diff --git a/404.html b/404.html new file mode 100644 index 00000000..e8dcfa2e --- /dev/null +++ b/404.html @@ -0,0 +1,214 @@ + + + + + Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

404

+

Page Not Found

+

Sorry, this page does not exist.
You can head back to the homepage.

+
+
+ + +
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/README.md b/README.md deleted file mode 100644 index fa0f2a19..00000000 --- a/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# My personal website - -Check it out at [www.zansara.dev](www.zansara.dev) diff --git a/static/_others/2023-11-xx-haystack-chatbots-with-script/conversation-tree.png b/_others/2023-11-xx-haystack-chatbots-with-script/conversation-tree.png similarity index 100% rename from static/_others/2023-11-xx-haystack-chatbots-with-script/conversation-tree.png rename to _others/2023-11-xx-haystack-chatbots-with-script/conversation-tree.png diff --git a/static/_others/2023-11-xx-haystack-chatbots-with-script/cover.png b/_others/2023-11-xx-haystack-chatbots-with-script/cover.png similarity index 100% rename from static/_others/2023-11-xx-haystack-chatbots-with-script/cover.png rename to _others/2023-11-xx-haystack-chatbots-with-script/cover.png diff --git a/static/_others/francobolli_esplosione.jpg b/_others/francobolli_esplosione.jpg similarity index 100% rename from static/_others/francobolli_esplosione.jpg rename to _others/francobolli_esplosione.jpg diff --git a/static/_others/francobolli_rose.jpg b/_others/francobolli_rose.jpg similarity index 100% rename from static/_others/francobolli_rose.jpg rename to _others/francobolli_rose.jpg diff --git a/static/_others/haystack-diagram-query-indexing-pair.png b/_others/haystack-diagram-query-indexing-pair.png similarity index 100% rename from static/_others/haystack-diagram-query-indexing-pair.png rename to _others/haystack-diagram-query-indexing-pair.png diff --git a/static/_others/otiginale.jpg b/_others/otiginale.jpg similarity index 100% rename from static/_others/otiginale.jpg rename to _others/otiginale.jpg diff --git a/about-me/index.html b/about-me/index.html new file mode 100644 index 00000000..79e9573f --- /dev/null +++ b/about-me/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/about/ + + + + + + diff --git a/about/index.html b/about/index.html new file mode 100644 index 00000000..ad52c5df --- /dev/null +++ b/about/index.html @@ -0,0 +1,270 @@ + + + + + + About · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + About + +

+
+ +
+ +
+ +

I am a Python and LLMs specialist currently working for deepset, +a German startup working on NLP since “before it was cool”. +At the moment I’m the #1 contributor +of Haystack, their open-source framework for building highly +customizable, production-ready NLP and LLM applications.

+

Previously I have been working at CERN, where I began my software engineering +career. During my time there I had the privilege of driving one +major decision to migrate the graphical +interface’s software of the accelerator’s control systems from Java to PyQt, +and then of helping a client department migrate to this stack. +I have also worked on other infrastructure and data pipelines, some of which resulted in +publication.

+

Outside of work I have too many pet projects to follow up with than the free time I +can dedicate to them. +I love science fiction and space exploration, I enjoy challenging hikes in nature and learning +languages, as much as such process can be enjoyed.

+

I speak Italian as a native and fluent English, but I’ve also learned French during my time at CERN, +I’m studying Hungarian for family reasons, and Portuguese because I currently live there. +I still can understand some Russian and I have a very basic understanding +of Chinese, both from my teenage and university years.

+

You can find my latest CV here. Check out also my +projects, my publications and my talks.

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archetypes/default.md b/archetypes/default.md deleted file mode 100644 index 00e77bd7..00000000 --- a/archetypes/default.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "{{ replace .Name "-" " " | title }}" -date: {{ .Date }} -draft: true ---- - diff --git a/config.toml b/config.toml deleted file mode 100644 index 00028471..00000000 --- a/config.toml +++ /dev/null @@ -1,116 +0,0 @@ -baseurl = "https://www.zansara.dev" -title = "Sara Zan" -theme = "hugo-coder" -languagecode = "en" -defaultcontentlanguage = "en" - -paginate = 20 - -[markup.highlight] -style = "github-dark" - -[markup] - [markup.goldmark] - [markup.goldmark.renderer] - unsafe = true - -[params] - navbar = "Home" - author = "Sara Zan" - info = """ - Python and LLMs specialist, - [#1 contributor](https://github.com/deepset-ai/haystack/graphs/contributors) of [Haystack](https://haystack.deepset.ai/), - former [CERN](https://home.cern/) employee. - - I'm also an opinionated sci-fi reader, hiker, tinkerer and somewhat polyglot. Currently busy trying to learn Portuguese and Hungarian at the same time. - """ - description = "Sara Zan's Personal Website" - keywords = "blog,developer,personal,python,llm,nlp,swe,software-engineering,open-source" - avatarurl = "/me/avatar.jpeg" - - #gravatar = "john.doe@example.com" - - faviconSVG = "/favicon.png" - favicon_32 = "/favicon.png" - touchicon = "/favicon.png" - # favicon_16 = "/img/favicon-16x16.png" - - since = 2023 - - enableTwemoji = true - - colorScheme = "auto" - hidecolorschemetoggle = false - -# Social links -[[params.social]] - name = "Github" - icon = "fa fa-github fa-2x" - weight = 1 - url = "https://github.com/ZanSara/" -[[params.social]] - name = "Linkedin" - icon = "fa fa-linkedin fa-2x" - weight = 1 - url = "https://www.linkedin.com/in/sarazanzottera" -[[params.social]] - name = "Twitter" - icon = "fa fa-twitter fa-2x" - weight = 1 - url = "https://twitter.com/zansara_dev" -[[params.social]] - name = "Mastodon" - icon = "fa fa-brands fa-mastodon fa-2x" - weight = 1 - url = "https://mastodon.social/@zansara" -[[params.social]] - name = "Discord" - icon = "fa fa-brands fa-discord fa-2x" - weight = 1 - url = "https://discord.gg/haystack" -[[params.social]] - name = "Stackoverflow" - icon = "fa fa-stack-overflow fa-2x" - weight = 1 - url = "https://stackoverflow.com/users/19108168/zansara" -[[params.social]] - name = "Email" - icon = "fa fa-envelope fa-2x" - weight = 1 - url = "mailto:blog@zanzohegyi.com" -[[params.social]] - name = "RSS" - icon = "fa fa-rss fa-2x" - weight = 3 - url = "/index.xml" - -[taxonomies] - tag = "tags" - series = "series" - -# Menu links -[[menu.main]] - name = "About" - weight = 1 - url = "about" -[[menu.main]] - name = "Posts" - weight = 2 - url = "posts/" -[[menu.main]] - name = "Projects" - weight = 3 - url = "projects/" -[[menu.main]] - name = "Publications" - weight = 4 - url = "publications/" -[[menu.main]] - name = "Talks" - weight = 5 - url = "talks/" -[[menu.main]] - name = "Tags" - weight = 6 - url = "tags/" - diff --git a/contact/index.html b/contact/index.html new file mode 100644 index 00000000..79e9573f --- /dev/null +++ b/contact/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/about/ + + + + + + diff --git a/content/about.md b/content/about.md deleted file mode 100644 index e522f1bf..00000000 --- a/content/about.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "About" -description: "A short introduction" -aliases: ["about-me", "zansara", "contact"] -author: "ZanSara" ---- - -I am a Python and LLMs specialist currently working for [deepset](https://www.deepset.ai/), -a German startup working on NLP [since "before it was cool"](https://www.deepset.ai/about). -At the moment I'm the [#1 contributor](https://github.com/deepset-ai/haystack/graphs/contributors) -of [Haystack](https://haystack.deepset.ai/), their open-source framework for building highly -customizable, production-ready NLP and LLM applications. - -Previously I have been working at [CERN](https://home.cern/), where I began my software engineering -career. During my time there I had the privilege of driving one -[major decision](/publications/tucpr03/) to migrate the graphical -interface's software of the accelerator's control systems from Java to PyQt, -and then of helping a client department [migrate](/publications/thpv014/) to this stack. -I have also worked on other infrastructure and data pipelines, some of which resulted in -[publication](publications/thpv042/). - -Outside of work I have too many [pet projects](projects/) to follow up with than the free time I -can dedicate to them. -I love science fiction and space exploration, I enjoy challenging hikes in nature and learning -languages, as much as such process can be enjoyed. - -I speak Italian as a native and fluent English, but I've also learned French during my time at CERN, -I'm studying Hungarian for family reasons, and Portuguese because I currently live there. -I still can understand some Russian and I have a very basic understanding -of Chinese, both from my teenage and university years. - -You can find my latest CV [here](/me/sara_zanzottera_cv.pdf). Check out also my -[projects](/projects), my [publications](/publications) and my [talks](/talks). diff --git a/content/posts/2021-12-11-dotfiles.md b/content/posts/2021-12-11-dotfiles.md deleted file mode 100644 index a139bdce..00000000 --- a/content/posts/2021-12-11-dotfiles.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "My Dotfiles" -date: 2021-12-11 -author: "ZanSara" -tags: [Linux] -featuredImage: "/posts/2021-12-11-dotfiles/cover.png" ---- - -GitHub Repo: https://github.com/ZanSara/dotfiles - ---- - -What Linux developer would I be if I didn't also have my very own dotfiles repo? - -After many years of iterations I finally found a combination that lasted quite a while, so I figured it's time to treat them as a real project. It was originally optimized for my laptop, but then I realized it works quite well on my three-monitor desk setup as well without major issues. - -It sports: -- [i3-wm](https://github.com/Airblader/i3) as window manager (of course, with gaps), -- The typical trio of [polybar](https://github.com/polybar/polybar) , [rofi](https://github.com/davatorium/rofi) and [dunst](https://github.com/dunst-project/dunst) to handle top bar, start menu and notifications respectively, -- The odd choice of [Ly](https://github.com/nullgemm/ly) as my display manager. I just love the minimal, TUI aesthetics of it. Don't forget to enable Doom's flames! -- A minimalistic animated background from [xscreensaver](https://www.jwz.org/xscreensaver/screenshots/), [Grav](https://www.youtube.com/watch?v=spQRFDmDMeg). It's configured to leave no trails and stay black and white. An odd choice, and yet it manages to use no resources, stay very minimal, and bring a very (in my opinion) futuristic look to the entire setup. -- [OhMyBash](https://github.com/ohmybash/oh-my-bash/tree/master/themes/font) with the [font](https://github.com/ohmybash/oh-my-bash/tree/master/themes/font) theme, -- Other small amenities, like [nmtui](https://docs.rockylinux.org/gemstones/nmtui/) for network management, Japanese-numerals as workspace indicators, etc.. - -Feel free to take what you like. If you end up using any of these, make sure to share the outcomes! diff --git a/content/posts/2023-09-10-python-verbix-sdk.md b/content/posts/2023-09-10-python-verbix-sdk.md deleted file mode 100644 index f2bbcf6e..00000000 --- a/content/posts/2023-09-10-python-verbix-sdk.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "An (unofficial) Python SDK for Verbix" -date: 2023-09-10 -author: "ZanSara" -tags: [Linux, Languages, Verbix, Conjugations, Flashcards, Python, API, SDK] -featuredImage: "/posts/2023-09-10-python-verbix-sdk/cover.png" ---- - -PyPI package: https://pypi.org/project/verbix-sdk/ - -GitHub Repo: https://github.com/ZanSara/verbix-sdk - -Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md - ---- - -As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were "simple": - -- Supports many languages, including Italian, Portuguese and Hungarian -- Conjugates irregulars properly -- Offers an API access to the conjugation tables -- Refuses to conjugate anything except for known verbs -- (Optional) Highlights the irregularities in some way - -Surprisingly these seem to be a shortage of good alternatives in this field. All websites that host polished conjugation data don't seem to offer API access (looking at you, [Reverso](https://conjugator.reverso.net) -- you'll get your own post one day), and most of the simples ones use heuristics to conjugate, which makes them very prone to errors. So for now I ended up choosing [Verbix](https://verbix.com) to start from. - -Unfortunately the website doesn't inspire much confidence. I attempted to email the creator just to see them [close their email account](https://verbix.com/contact.html) a while later, an [update in their API](https://api.verbix.com/) seems to have stalled half-way, and the [blog seems dead](https://verb-blog.verbix.com/). I often have the feeling this site might go under any minute, as soon as their domain registration expires. - -But there are pros to it, as long as it lasts. Verbix offers verbs conjugation and nouns declination tables for some [very niche languages, dialects and conlangs](https://verbix.com/languages/), to a degree that many other popular websites does not even come close. To support such variety they use heuristic to create the conjugation tables, which is not the best: for Hungarian, for example, I could easily get it to conjugate for me [verbs that don't exist](https://verbix.com/webverbix/go.php?T1=meegy&Submit=Go&D1=121&H1=221) or that have spelling mistakes. On the other hand their API do have a field that says whether the verb is known or not, which is a great way to filter out false positives. - -So I decided to go the extra mile and I wrote a small Python SDK for their API: [verbix-sdk](https://pypi.org/project/verbix-sdk/). Enjoy it while it lasts... diff --git a/content/posts/2023-10-10-haystack-series-intro.md b/content/posts/2023-10-10-haystack-series-intro.md deleted file mode 100644 index c0489564..00000000 --- a/content/posts/2023-10-10-haystack-series-intro.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Haystack 2.0: What is it?" -date: 2023-10-10 -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, LLM, NLP, Python, AI] -series: ["Haystack 2.0 Series"] -featuredImage: "/posts/2023-10-10-haystack-series-intro/cover.png" ---- - -December is finally approaching, and with it the release of a [Haystack](https://github.com/deepset-ai/haystack) 2.0. At [deepset](https://www.deepset.ai/), we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline. - -But what is it that makes this release so special? - -In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. For our small team, this is a huge accomplishment. - -In this series, I want to explain what Haystack 2 is from the perspective of the team that developed it. I'm gonna talk about what makes the new Pipeline so different from the old one, how to use new components and features, how these compare with the equivalent in Haystack 1 (when possible) and the principles that led the redesign. I had the pleasure (and sometimes the burden) of being involved in nearly all aspects of this process, from the requirements definition to the release, and I drove many of them through several iterations. In these posts, you can expect a mix of technical details and some diversions on the history and rationale behind each decision, as I’ve seen and understood them. - -For the curious readers, we have already released a lot of information about Haystack 2.0: check out this [this Github Discussion](https://github.com/deepset-ai/haystack/discussions/5568), or join us on [Haystack's Discord server](https://discord.com/invite/VBpFzsgRVF) and peek into the `haystack-2.0` channel for regular updates. We are also slowly building [brand new documentation](https://docs.haystack.deepset.ai/v2.0/docs) for everything, and don’t worry: we’ll make sure to make it as outstanding as the Haystack 1.x version is. - -We also regularly feature 2.0 features in our Office Hours on Discord. Follow [@Haystack_AI](https://twitter.com/Haystack_AI) or [@deepset_ai](https://twitter.com/deepset_ai) on Twitter to stay up-to-date, or [deepset](https://www.linkedin.com/company/deepset-ai) on Linkedin. And you’ll find me and the rest of the team on [GitHub](https://github.com/deepset-ai/haystack) frantically (re)writing code and filing down the rough edges before the big release. - -Stay tuned! - ---- - -*Next: [Why rewriting Haystack?!](/posts/2023-10-11-haystack-series-why)* - -*See the entire series here: [Haystack 2.0 series](/series/haystack-2.0-series/)* - - diff --git a/content/posts/2023-10-11-haystack-series-why.md b/content/posts/2023-10-11-haystack-series-why.md deleted file mode 100644 index c3fd4ffd..00000000 --- a/content/posts/2023-10-11-haystack-series-why.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: "Why rewriting Haystack?!" -date: 2023-10-11 -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, LLM, NLP, Python, AI] -series: ["Haystack 2.0 Series"] -featuredImage: "/posts/2023-10-11-haystack-series-why/cover.png" ---- - -Before even diving into what Haystack 2.0 is, how it was built, and how it works, let's spend a few words about the whats and the whys. - -First of all, *what is* Haystack? - -And next, why on Earth did we decide to rewrite it from the ground up? - -### A Pioneer Framework - -Haystack is a relatively young framework, its initial release dating back to [November 28th, 2019](https://github.com/deepset-ai/haystack/releases/tag/0.1.0). Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. We were proud to enable use cases such as [semantic search](https://medium.com/deepset-ai/what-semantic-search-can-do-for-you-ea5b1e8dfa7f), [FAQ matching](https://medium.com/deepset-ai/semantic-faq-search-with-haystack-6a03b1e13053), document similarity, document summarization, machine translation, language-agnostic search, and so on. - -The field was niche but constantly moving, and research was lively. [The BERT paper](https://arxiv.org/abs/1810.04805) had been published a few months before Haystack's first release, unlocking a small revolution. In the shade of much larger research labs, [deepset](https://www.deepset.ai/), then just a pre-seed stage startup, was also pouring effort into [research](https://arxiv.org/abs/2104.12741) and [model training](https://huggingface.co/deepset). - -In those times, competition was close to non-existent. The field was still quite technical, and most people didn't fully understand its potential. We were free to explore features and use cases at our own pace and set the direction for our product. This allowed us to decide what to work on, what to double down on, and what to deprioritize, postpone, or ignore. Haystack was nurturing its own garden in what was fundamentally a green field. - - -### ChatGPT - -This rather idyllic situation came to an end all too abruptly at the end of November 2022, when [ChatGPT was released](https://openai.com/blog/chatgpt). - -For us in the NLP field, everything seemed to change overnight. Day by day. For *months*. - -The speed of progress went from lively to faster-than-light all at once. Every company with the budget to train an LLM seemed to be doing so, and researchers kept releasing new models just as quickly. Open-source contributors pushed to reduce the hardware requirements for inference lower and lower. My best memory of those times is the drama of [LlaMa's first "release"](https://github.com/facebookresearch/llama/pull/73): I remember betting on March 2nd that within a week I would be running LlaMa models on my laptop, and I wasn't even surprised when my prediction [turned out true](https://news.ycombinator.com/item?id=35100086) with the release of [llama.cpp](https://github.com/ggerganov/llama.cpp) on March 10th. - -Of course, keeping up with this situation was far beyond us. Competitors started to spawn like mushrooms, and our space was quickly crowded with new startups, far more agile and aggressive than us. We suddenly needed to compete and realized we weren't used to it. - -### PromptNode vs FARMReader - -Luckily, Haystack seemed capable of keeping up, at least for a while. Thanks to the efforts of [Vladimir Blagojevic](https://twitter.com/vladblagoje), a few weeks after ChatGPT became a sensation, we added some decent support for LLMs in the form of [PromptNode](https://github.com/deepset-ai/haystack/pull/3665). Our SaaS team could soon bring new LLM-powered features to our customers. We even managed to add support for [Agents](https://github.com/deepset-ai/haystack/pull/3925), another hot topic in the wake of ChatGPT. - -However, the go-to library for LLMs was not Haystack in the mind of most developers. It was [LangChain](https://docs.langchain.com/docs/), and for a long time, it seemed like we would never be able to challenge their status and popularity. Everyone was talking about it, everyone was building demos, products, and startups on it, its development speed was unbelievable and, in the day-to-day discourse of the newly born LLM community, Haystack was nowhere to be found. - -Why? - -That's because no one even realized that Haystack, the semantic search framework from 2019, also supported LLMs. All our documentation, tutorials, blog posts, research efforts, models on HuggingFace, *everything* was pointing towards semantic search. LLMs were nowhere to be seen. - -And semantic search was going down *fast*. - -![Reader Models downloads graph](/posts/2023-10-11-haystack-series-why/reader-model-downloads.png) - -The image above shows today's monthly downloads for one of deepset's most successful models on HuggingFace, -[deepset/roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2). This model performs [extractive Question Answering](https://huggingface.co/tasks/question-answering), our former primary use case before the release of ChatGPT. Even with more than one and a half million downloads monthly, this model is experiencing a disastrous collapse in popularity, and in the current landscape, it is unlikely to ever recover. - - -### A (Sort Of) Pivot - -In this context, around February 2023, we decided to bet on the rise of LLMs and committed to focus all our efforts towards becoming the #1 framework powering production-grade LLM applications. - -As we quickly realized, this was by far not an easy proposition. Extractive QA was not only ingrained deeply in our public image but in our codebase as well: implementing and maintaining PromptNode was proving more and more painful by the day, and when we tried to fit the concept of Agents into Haystack, it felt uncomfortably like trying to force a square peg into a round hole. - -Haystack pipelines made extractive QA straightforward for the users and were highly optimized for this use case. But supporting LLMs was nothing like enabling extractive QA. Using Haystack for LLMs was quite a painful experience, and at the same time, modifying the Pipeline class to accommodate them seemed like the best way to mess with all the users that relied on the current Pipeline for their existing, value-generating applications. Making mistakes with Pipeline could ruin us. - -With this realization in mind, we took what seemed the best option for the future of Haystack: a rewrite. The knowledge and experience we gained while working on Haystack 1 could fuel the design of Haystack 2 and act as a reference frame for it. Unlike our competitors, we already knew a lot about how to make NLP work at scale. We made many mistakes we would avoid in our next iteration. We knew that focusing on the best possible developer experience fueled the growth of Haystack 1 in the early days, and we were committed to doing the same for the next version of it. - -So, the redesign of Haystack started, and it started from the concept of Pipeline. - -### Fast-forward - -Haystack 2.0 hasn't been released yet, but for now, it seems that we have made the right decision at the start of the year. - -Haystack's name is starting to appear more often in discussions around LLMs. The general tone of the community is steadily shifting, and scaling up, rather than experimenting, is now the focus. Competitors are re-orienting themselves toward production-readiness, something we're visibly more experienced with. At the same time, LangChain is becoming a victim of its own success, collecting more and more criticism for its lack of documentation, leaky abstractions, and confusing architecture. Other competitors are gaining steam, but the overall landscape no longer feels as hostile. - -In the next post, I will explore the technical side of Haystack 2.0 and delve deeper into the concept of Pipelines: what they are, how to use them, how they evolved from Haystack 1 to Haystack 2, and why. - ---- - -*Next: [Haystack's Pipeline - A Deep Dive](/posts/2023-10-15-haystack-series-pipeline)* - -*Previous: [Haystack 2.0: What is it?](/posts/2023-10-10-haystack-series-intro)* - -*See the entire series here: [Haystack 2.0 series](/series/haystack-2.0-series/)* \ No newline at end of file diff --git a/content/posts/2023-10-15-haystack-series-pipeline.md b/content/posts/2023-10-15-haystack-series-pipeline.md deleted file mode 100644 index e2e4842e..00000000 --- a/content/posts/2023-10-15-haystack-series-pipeline.md +++ /dev/null @@ -1,437 +0,0 @@ ---- -title: "Haystack's Pipeline - A Deep Dive" -date: 2023-10-15 -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, NLP, Python, Pipeline, DAG, graph, "API Design", "Semantic Search", "Hybrid Retrieval"] -series: ["Haystack 2.0 Series"] -featuredImage: "/posts/2023-10-15-haystack-series-pipeline/cover.png" ---- -If you've ever looked at Haystack before, you must have come across the [Pipeline](https://docs.haystack.deepset.ai/docs/pipelines), one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? - -In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. This deep dive into the current state of the framework is also a premise for the next episode, where I will explain how Haystack 2.0 addresses this version's shortcomings. - -If you think you already know how Haystack Pipelines work, give this post a chance: I might manage to change your mind. - -## A Bit Of History - -Interestingly, in the very first releases of Haystack, Pipelines were not a thing. Version 0.1.0 was released with a simpler object, the [Finder](https://github.com/deepset-ai/haystack/blob/d2c77f307788899eb562d3cb6e42c69b968b9f2a/haystack/__init__.py#L16), that did little more than gluing together a [Retriever](https://docs.haystack.deepset.ai/docs/retriever) and a [Reader](https://docs.haystack.deepset.ai/docs/reader), the two fundamental building blocks of a [semantic search](https://docs.haystack.deepset.ai/docs/glossary#semantic-search) application. - -In the next few months, however, the capabilities of language models expanded to enable many more use cases. One hot topic was [hybrid retrieval](https://haystack.deepset.ai/blog/hybrid-retrieval): a system composed of two different Retrievers, an optional [Ranker](https://docs.haystack.deepset.ai/docs/ranker), and an optional Reader. This kind of application clearly didn't fit the Finder's design, so in [version 0.6.0](https://github.com/deepset-ai/haystack/releases/tag/v0.6.0) the [Pipeline](https://docs.haystack.deepset.ai/docs/pipelines) object was introduced: a new abstraction that helped users build applications as a graph of components. - -Pipeline's API was a huge step forward from Finder. It instantly enabled seemingly endless combinations of components, unlocked almost all use cases conceivable, and became a foundational Haystack concept meant to stay for a very long time. In fact, the API offered by the first version of Pipeline changed very little since its initial release. - -This is the snippet included in the release notes of version 0.6.0 to showcase hybrid retrieval. Does it look familiar? - -```python -p = Pipeline() -p.add_node(component=es_retriever, name="ESRetriever", inputs=["Query"]) -p.add_node(component=dpr_retriever, name="DPRRetriever", inputs=["Query"]) -p.add_node(component=JoinDocuments(join_mode="concatenate"), name="JoinResults", inputs=["ESRetriever", "DPRRetriever"]) -p.add_node(component=reader, name="QAReader", inputs=["JoinResults"]) -res = p.run(query="What did Einstein work on?", top_k_retriever=1) -``` - -## A Powerful Abstraction - -One fascinating aspect of this Pipeline model is the simplicity of its user-facing API. In almost all examples, you see only two or three methods used: - -- `add_node`: to add a component to the graph and connect it to the others. -- `run`: to run the Pipeline from start to finish. -- `draw`: to draw the graph of the Pipeline to an image. - -At this level, users don't need to know what kind of data the components need to function, what they produce, or even what the components *do*: all they need to know is the place they must occupy in the graph for the system to work. - -For example, as long as the users know that their hybrid retrieval pipeline should look more or less like this (note: this is the output of `Pipeline.draw()`), translating it into a Haystack Pipeline object using a few `add_node` calls is mostly straightforward. - -![Hybrid Retrieval](/posts/2023-10-15-haystack-series-pipeline/hybrid-retrieval.png) - -This fact is reflected by the documentation of the various components as well. For example, this is how the documentation page for Ranker opens: - -![Ranker Documentation](/posts/2023-10-15-haystack-series-pipeline/ranker-docs.png) - -Note how the first information about this component is *where to place it*. Right after, it specifies its inputs and outputs, even though it's not immediately clear why we need this information, and then lists which specific classes can cover the role of a Ranker. - -The message is clear: all Ranker classes are functionally interchangeable, and as long as you place them correctly in the Pipeline, they will fulfill the function of Ranker as you expect them to. Users don't need to understand what distinguishes `CohereRanker` from `RecentnessReranker` unless they want to: the documentation promises that you can swap them safely, and thanks to the Pipeline abstraction, this statement mostly holds true. - -## Ready-made Pipelines - -But how can the users know which sort of graph they have to build? - -Most NLP applications are made by a relatively limited number of high-level components: Retriever, Readers, Rankers, plus the occasional Classifier, Translator, or Summarizer. Systems requiring something more than these components used to be really rare, at least when talking about "query" pipelines (more on this later). - -Therefore, at this level of abstraction, there are just a few graph topologies possible. Better yet, they could each be mapped to high-level use cases such as semantic search, language-agnostic document search, hybrid retrieval, and so on. - -But the crucial point is that, in most cases, tailoring the application did not require any changes to the graph's shape. Users only need to identify their use case, find an example or a tutorial defining the shape of the Pipeline they need, and then swap the single components with other instances from the same category until they find the best combination for their exact requirements. - -This workflow was evident and encouraged: it was the philosophy behind Finder as well, and from version 0.6.0, Haystack immediately provided what are called "[Ready-made Pipelines](https://docs.haystack.deepset.ai/docs/ready_made_pipelines)": objects that initialized the graph on the user's behalf, and expected as input the components to place in each point of the graph: for example a Reader and a Retriever, in case of simple Extractive QA. - -With this further abstraction on top of Pipeline, creating an NLP application became an action that doesn't even require the user to be aware of the existence of the graph. In fact: - -```python -pipeline = ExtractiveQAPipeline(reader, retriever) -``` - -is enough to get your Extractive QA applications ready to answer your questions. And you can do so with just another line. - -```python -answers = pipeline.run(query="What did Einstein work on?") -``` - -## "Flexibility powered by DAGs" - -This abstraction is extremely powerful for the use cases that it was designed for. There are a few layers of ease of use vs. customization the user can choose from depending on their expertise, which help them progress from a simple ready-made Pipeline to fully custom graphs. - -However, the focus was oriented so much on the initial stages of the user's journey that power-users' needs were sometimes forgotten. Such issues didn't show immediately, but quickly added friction as soon as the users tried to customize their system beyond the examples from the tutorials and the documentation. - -For an example of these issues, let's talk about pipelines with branches. Here are two small, apparently very similar pipelines. - -![Query Classification vs Hybrid Retrieval](/posts/2023-10-15-haystack-series-pipeline/branching-query-pipelines.png) - -The first Pipeline represents the Hybrid Retrieval use case we've met with before. Here, the Query node sends its outputs to both retrievers, and they both produce some output. For the Reader to make sense of this data, we need a Join node that merges the two lists into one and a Ranker that takes the lists and sorts them again by similarity to the query. Ranker then sends the rearranged list to the Reader. - -The second Pipeline instead performs a simpler form of Hybrid Retrieval. Here, the Query node sends its outputs to a Query Classifier, which then triggers only one of the two retrievers, the one that is expected to perform better on it. The triggered Retriever then sends its output directly to the Reader, which doesn't need to know which Retriever the data comes from. So, in this case, we don't need the Join node. - -The two pipelines are built as you would expect, with a bunch of `add_node` calls. You can even run them with the same identical code, which is the same code needed for every other Pipeline we've seen so far. - -```python -pipeline_1 = Pipeline() -pipeline_1.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["Query"]) -pipeline_1.add_node(component=dense_retriever, name="DenseRetriever", inputs=["Query"]) -pipeline_1.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"]) -pipeline_1.add_node(component=rerank, name="Ranker", inputs=["JoinDocuments"]) -pipeline_1.add_node(component=reader, name="Reader", inputs=["SparseRetriever", "DenseRetriever"]) - -answers = pipeline_1.run(query="What did Einstein work on?") -``` -```python -pipeline_2 = Pipeline() -pipeline_2.add_node(component=query_classifier, name="QueryClassifier", inputs=["Query"]) -pipeline_2.add_node(component=sparse_retriever, name="DPRRetriever", inputs=["QueryClassifier"]) -pipeline_2.add_node(component=dense_retriever, name="ESRetriever", inputs=["QueryClassifier"]) -pipeline_2.add_node(component=reader, name="Reader", inputs=["SparseRetriever", "DenseRetriever"]) - -answers = pipeline_2.run(query="What did Einstein work on?") -``` - -Both pipelines run as you would expect them to. Hooray! Pipelines can branch and join! - -Now, let's take the first Pipeline and customize it further. - -For example, imagine we want to expand language support to include French. The dense Retriever has no issues handling several languages as long as we select a multilingual model; however, the sparse Retriever needs the keywords to match, so we must translate the queries to English to find some relevant documents in our English-only knowledge base. - -Here is what the Pipeline ends up looking like. Language Classifier sends all French queries over `output_1` and all English queries over `output_2`. In this way, the query passes through the Translator node only if it is written in French. - -![Multilingual Hybrid Retrieval](/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval.png) - -```python -pipeline = Pipeline() -pipeline.add_node(component=language_classifier, name="LanguageClassifier", inputs=["Query"]) -pipeline.add_node(component=translator, name="Translator", inputs=["LanguageClassifier.output_1"]) -pipeline.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["Translator", "LanguageClassifier.output_2"]) -pipeline.add_node(component=dense_retriever, name="DenseRetriever", inputs=["LanguageClassifier.output_1", "LanguageClassifier.output_2"]) -pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"]) -pipeline.add_node(component=rerank, name="Ranker", inputs=["JoinDocuments"]) -pipeline.add_node(component=reader, name="Reader", inputs=["Ranker"]) -``` - -But... wait. Let's look again at the graph and at the code. DenseRetriever should receive *two* inputs from Language Classifier: both `output_1` and `output_2`, because it can handle both languages. What's going on? Is this a bug in `draw()`? - -Thanks to the `debug=True` parameter of `Pipeline.run()`, we start inspecting what each node saw during the execution, and we realize quickly that our worst fears are true: this is a bug in the Pipeline implementation. The underlying library powering the Pipeline's graphs takes the definition of Directed Acyclic Graphs very seriously and does not allow two nodes to be connected by more than one edge. There are, of course, other graph classes supporting this case, but Haystack happens to use the wrong one. - -Interestingly, Pipeline doesn't even notice the problem and does not fail. It runs as the drawing suggests: when the query happens to be in French, only the sparse Retriever will process it. - -Clearly, this is not good for us. - -Well, let's look for a workaround. Given that we're Haystack power users by now, we realize that we can use a Join node with a single input as a "no-op" node. If we put it along one of the edges, that edge won't directly connect Language Classifier and Dense Retriever, so the bug should be solved. - -So here is our current Pipeline: - -![Multilingual Hybrid Retrieval with No-Op Joiner](/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-with-noop.png) - -```python -pipeline = Pipeline() -pipeline.add_node(component=language_classifier, name="LanguageClassifier", inputs=["Query"]) -pipeline.add_node(component=translator, name="Translator", inputs=["LanguageClassifier.output_1"]) -pipeline.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["Translator", "LanguageClassifier.output_2"]) -pipeline.add_node(component=no_op_join, name="NoOpJoin", inputs=["LanguageClassifier.output_1"]) -pipeline.add_node(component=dense_retriever, name="DenseRetriever", inputs=["NoOpJoin", "LanguageClassifier.output_2"]) -pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"]) -pipeline.add_node(component=rerank, name="Ranker", inputs=["JoinDocuments"]) -pipeline.add_node(component=reader, name="Reader", inputs=["Ranker"]) -``` - -Great news: the Pipeline now runs as we expect! However, when we run a French query, the results are better but still surprisingly bad. - -What now? Is the dense Retriever still not running? Is the Translation node doing a poor job? - -Some debugging later, we realize that the Translator is amazingly good and the Retrievers are both running. But we forgot another piece of the puzzle: Ranker needs the query to be in the same language as the documents. It requires the English version of the query, just like the sparse Retriever does. However, right now, it receives the original French query, and that's the reason for the lack of performance. We soon realize that this is very important also for the Reader. - -So... how does the Pipeline pass the query down to the Ranker? - -Until this point, we didn't need to know how exactly values are passed from one component to the next. We didn't need to care about their inputs and outputs at all: Pipeline was doing all this dirty work for us. Suddenly, we need to tell the Pipeline which query to pass to the Ranker and we have no idea how to do that. - -Worse yet. There is *no way* to reliably do that. The documentation seems to blissfully ignore the topic, docstrings give us no pointers, and looking at [the routing code of Pipeline](https://github.com/deepset-ai/haystack/blob/aaee03aee87e96acd8791b9eff999055a8203237/haystack/pipelines/base.py#L483) we quickly get dizzy and cut the chase. We dig through the Pipeline API several times until we're confident that there's nothing that can help. - -Well, there must be at least some workaround. Maybe we can forget about this issue by rearranging the nodes. - -One easy way out is to translate the query for both retrievers instead of only for the sparse one. This solution also eliminates the NoOpJoin node we introduced earlier, so it doesn't sound too bad. - -The Pipeline looks like this now. - -![Multilingual Hybrid Retrieval with two Translators](/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-two-translators.png) - -```python -pipeline = Pipeline() -pipeline.add_node(component=language_classifier, name="LanguageClassifier", inputs=["Query"]) -pipeline.add_node(component=translator, name="Translator", inputs=["LanguageClassifier.output_1"]) -pipeline.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["Translator", "LanguageClassifier.output_2"]) -pipeline.add_node(component=translator_2, name="Translator2", inputs=["LanguageClassifier.output_1"]) -pipeline.add_node(component=dense_retriever, name="DenseRetriever", inputs=["Translator2", "LanguageClassifier.output_2"]) -pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"]) -pipeline.add_node(component=rerank, name="Ranker", inputs=["JoinDocuments"]) -pipeline.add_node(component=reader, name="Reader", inputs=["Ranker"]) -``` - -We now have two nodes that contain identical translator components. Given that they are stateless, we can surely place the same instance in both places, with different names, and avoid doubling its memory footprint just to work around a couple of Pipeline bugs. After all, Translator nodes use relatively heavy models for machine translation. - -This is what Pipeline replies as soon as we try. - -``` -PipelineConfigError: Cannot add node 'Translator2'. You have already added the same -instance to the Pipeline under the name 'Translator'. -``` - -Okay, so it seems like we can't re-use components in two places: there is an explicit check against this, for some reason. Alright, let's rearrange *again* this Pipeline with this new constraint in mind. - -How about we first translate the query and then distribute it? - -![Multilingual Hybrid Retrieval, translate-and-distribute](/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-translate-and-distribute.png) - -```python -pipeline = Pipeline() -pipeline.add_node(component=language_classifier, name="LanguageClassifier", inputs=["Query"]) -pipeline.add_node(component=translator, name="Translator", inputs=["LanguageClassifier.output_1"]) -pipeline.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["Translator", "LanguageClassifier.output_2"]) -pipeline.add_node(component=dense_retriever, name="DenseRetriever", inputs=["Translator", "LanguageClassifier.output_2"]) -pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"]) -pipeline.add_node(component=rerank, name="Ranker", inputs=["JoinDocuments"]) -pipeline.add_node(component=reader, name="Reader", inputs=["Ranker"]) -``` - -Looks neat: there is no way now for the original French query to reach Ranker now. Right? - -We run the pipeline again and soon realize that nothing has changed. The query received by Ranker is still in French, untranslated. Shuffling the order of the `add_node` calls and the names of the components in the `inputs` parameters seems to have no effect on the graph. We even try to connect Translator directly with Ranker in a desperate attempt to forward the correct value, but Pipeline now starts throwing obscure, apparently meaningless error messages like: - -``` -BaseRanker.run() missing 1 required positional argument: 'documents' -``` - -Isn't Ranker receiving the documents from JoinDocuments? Where did they go? - -Having wasted far too much time on this relatively simple Pipeline, we throw the towel, go to Haystack's Discord server, and ask for help. - -Soon enough, one of the maintainers shows up and promises a workaround ASAP. You're skeptical at this point, but the workaround, in fact, exists. - -It's just not very pretty. - -![Multilingual Hybrid Retrieval, working version](/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-workaround.png) - -```python -pipeline = Pipeline() -pipeline.add_node(component=language_classifier, name="LanguageClassifier", inputs=["Query"]) -pipeline.add_node(component=translator_workaround, name="TranslatorWorkaround", inputs=["LanguageClassifier.output_2"]) -pipeline.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["LanguageClassifier.output_1", "TranslatorWorkaround"]) -pipeline.add_node(component=dense_retriever, name="DenseRetriever", inputs=["LanguageClassifier.output_1", "TranslatorWorkaround"]) -pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"]) -pipeline.add_node(component=join_query_workaround, name="JoinQueryWorkaround", inputs=["TranslatorWorkaround", "JoinDocuments"]) -pipeline.add_node(component=rerank, name="Ranker", inputs=["JoinQueryWorkaround"]) -pipeline.add_node(component=reader, name="Reader", inputs=["Ranker"]) -``` - -Note that you need two custom nodes: a wrapper for the Translator and a brand-new Join node. - -```python -class TranslatorWorkaround(TransformersTranslator): - - outgoing_edges = 1 - - def run(self, query): - results, edge = super().run(query=query) - return {**results, "documents": [] }, "output_1" - - def run_batch(self, queries): - pass - - -class JoinQueryWorkaround(JoinNode): - - def run_accumulated(self, inputs, *args, **kwargs): - return {"query": inputs[0].get("query", None), "documents": inputs[1].get("documents", None)}, "output_1" - - def run_batch_accumulated(self, inputs): - pass - -``` - -Along with this beautiful code, we also receive an explanation about how the `JoinQueryWorkaround` node works only for this specific Pipeline and is pretty hard to generalize, which is why it's not present in Haystack right now. I'll spare you the details: you will have an idea why by the end of this journey. - -Wanna play with this Pipeline yourself and try to make it work in another way? Check out the [Colab](https://drive.google.com/file/d/18Gqfd0O828T71Gc-IHeU4v7OXwaPk7Fc/view?usp=sharing) or the [gist](https://gist.github.com/ZanSara/33020a980f2f535e2529df4ca4e8f08a) and have fun. - -Having learned only that it's better not to implement unusual branching patterns with Haystack unless you're ready for a fight, let's now turn to the indexing side of your application. We'll stick to the basics this time. - -## Indexing Pipelines - -Indexing pipelines' main goal is to transform files into Documents from which a query pipeline can later retrieve information. They mostly look like the following. - -![Indexing Pipeline](/posts/2023-10-15-haystack-series-pipeline/indexing-pipeline.png) - -And the code looks just like how you would expect it. - -```python -pipeline = Pipeline() -pipeline.add_node(component=file_type_classifier, name="FileTypeClassifier", inputs=["File"]) -pipeline.add_node(component=text_converter, name="TextConverter", inputs=["FileTypeClassifier.output_1"]) -pipeline.add_node(component=pdf_converter, name="PdfConverter", inputs=["FileTypeClassifier.output_2"]) -pipeline.add_node(component=docx_converter, name="DocxConverter", inputs=["FileTypeClassifier.output_4"]) -pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["TextConverter", "PdfConverter", "DocxConverter"]) -pipeline.add_node(component=preprocessor, name="Preprocessor", inputs=["JoinDocuments"]) -pipeline.add_node(component=document_store, name="DocumentStore", inputs=["Preprocessor"]) - -pipeline.run(file_paths=paths) -``` -There is no surprising stuff here. The starting node is File instead of Query, which seems logical given that this Pipeline expects a list of files, not a query. There is a document store at the end which we didn't use in query pipelines so far, but it's not looking too strange. It's all quite intuitive. - -Indexing pipelines are run by giving them the paths of the files to convert. In this scenario, more than one Converter may run, so we place a Join node before the PreProcessor to make sense of the merge. We make sure that the directory contains only files that we can convert, in this case, .txt, .pdf, and .docx, and then we run the code above. - -The code, however, fails. - -``` -ValueError: Multiple non-default file types are not allowed at once. -``` - -The more we look at the error, the less it makes sense. What are non-default file types? Why are they not allowed at once, and what can I do to fix that? - -We head for the documentation, where we find a lead. - -![`FileTypeClassifier documentation`](/posts/2023-10-15-haystack-series-pipeline/filetypeclassifier-docs.png) - -So it seems like the File Classifier can only process the files if they're all of the same type. - -After all we've been through with the Hybrid Retrieval pipelines, this sounds wrong. We know that Pipeline can run two branches at the same time. We've been doing it all the time just a moment ago. Why can't FileTypeClassifier send data to two converters just like LanguageClassifier sends data to two retrievers? - -Turns out, this is *not* the same thing. - -Let's compare the three pipelines and try to spot the difference. - -![All branching pipelines, side by side](/posts/2023-10-15-haystack-series-pipeline/all-branching-pipelines.png) - -In the first case, Query sends the same identical value to both Retrievers. So, from the component's perspective, there's a single output being produced: the Pipeline takes care of copying it for all nodes connected to it. - -In the second case, QueryClassifier can send the query to either Retriever but never to both. So, the component can produce two different outputs, but at every run, it will always return just one. - -In the third case, FileTypeClassifier may need to produce two different outputs simultaneously: for example, one with a list of text files and one with a list of PDFs. And it turns out this can't be done. This is, unfortunately, a well-known limitation of the Pipeline/BaseComponent API design. -The output of a component is defined as a tuple, `(output_values, output_edge)`, and nodes can't produce a list of these tuples to send different values to different nodes. - -That's the end of the story. This time, there is no workaround. You must pass the files individually or forget about using a Pipeline for this task. - -## Validation - -On top of these challenges, other tradeoffs had to be taken for the API to look so simple at first impact. One of these is connection validation. - -Let's imagine we quickly skimmed through a tutorial and got one bit of information wrong: we mistakenly believe that in an Extractive QA Pipeline, you need to place a Reader in front of a Retriever. So we sit down and write this. - -```python -p = Pipeline() -p.add_node(component=reader, name="Reader", inputs=["Query"]) -p.add_node(component=retriever, name="Retriever", inputs=["Reader"]) -``` - -Up to this point, running the script raises no error. Haystack is happy to connect these two components in this order. You can even `draw()` this Pipeline just fine. - -![Swapper Retriever/Reader Pipeline](/posts/2023-10-15-haystack-series-pipeline/swapped-retriever-reader.png) - -Alright, so what happens when we run it? - -```python -res = p.run(query="What did Einstein work on?") -``` -``` -BaseReader.run() missing 1 required positional argument: 'documents' -``` - -This is the same error we've seen in the translating hybrid retrieval pipeline earlier, but fear not! Here, we can follow the suggestion of the error message by doing: - -```python -res = p.run(query="What did Einstein work on?", documents=document_store.get_all_documents()) -``` - -And to our surprise, this Pipeline doesn't crash. It just hangs there, showing an insanely slow progress bar, telling us that some inference is in progress. A few hours later, we kill the process and consider switching to another framework because this one is clearly very slow. - -What happened? - -The cause of this issue is the same that makes connecting Haystack components in a Pipeline so effortless, and it's related to the way components and Pipeline communicate. If you check `Pipeline.run()`'s signature, you'll see that it looks like this: - - -```python -def run( - self, - query: Optional[str] = None, - file_paths: Optional[List[str]] = None, - labels: Optional[MultiLabel] = None, - documents: Optional[List[Document]] = None, - meta: Optional[Union[dict, List[dict]]] = None, - params: Optional[dict] = None, - debug: Optional[bool] = None, -): -``` - -which mirrors the `BaseComponent.run()` signature, the base class nodes have to inherit from. - -```python -@abstractmethod -def run( - self, - query: Optional[str] = None, - file_paths: Optional[List[str]] = None, - labels: Optional[MultiLabel] = None, - documents: Optional[List[Document]] = None, - meta: Optional[dict] = None, -) -> Tuple[Dict, str]: -``` - -This match means a few things: - -- Every component can be connected to every other because their inputs are identical. - -- Every component can only output the same variables received as input. - -- It's impossible to tell if it makes sense to connect two components because their inputs and outputs always match. - -Take this with a grain of salt: the actual implementation is far more nuanced than what I just showed you, but the problem is fundamentally this: components are trying to be as compatible as possible with all others and they have no way to signal, to the Pipeline or to the users, that they're meant to be connected only to some nodes and not to others. - -In addition to this problem, to respect the shared signature, components often take inputs that they don't use. A Ranker only needs documents, so all the other inputs required by the run method signature go unused. What do components do with the values? It depends: - -- Some have them in the signature and forward them unchanged. -- Some have them in the signature and don't forward them. -- Some don't have them in the signature, breaking the inheritance pattern, and Pipeline reacts by assuming that they should be added unchanged to the output dictionary. - -If you check closely the two workaround nodes for the Hybrid Retrieval pipeline we tried to build before, you'll notice the fix entirely focuses on altering the routing of the unused parameters `query` and `documents` to make the Pipeline behave the way the user expects. However, this behavior does not generalize: a different pipeline would require another behavior, which is why the components behave differently in the first place. - - -## Wrapping up - -I could go on for ages talking about the shortcomings of complex Pipelines, but I'd rather stop here. - -Along this journey into the guts of Haystack Pipelines, we've seen at the same time some beautiful APIs and the ugly consequences of their implementation. As always, there's no free lunch: trying to over-simplify the interface will bite back as soon as the use cases become nontrivial. - -However, we believe that this concept has a huge potential and that this version of Pipeline can be improved a lot before the impact on the API becomes too heavy. In Haystack 2.0, armed with the experience we gained working with this implementation of Pipeline, we reimplemented it in a fundamentally different way, which will prevent many of these issues. - -In the next post, we're going to see how. - ---- - -*Next: [Canals: a new concept of Pipeline](/posts/2023-10-26-haystack-series-canals)* - -*Previous: [Why rewriting Haystack?!](/posts/2023-10-11-haystack-series-why)* - -*See the entire series here: [Haystack 2.0 series](/series/haystack-2.0-series/)* \ No newline at end of file diff --git a/content/posts/2023-10-26-haystack-series-canals.md b/content/posts/2023-10-26-haystack-series-canals.md deleted file mode 100644 index de9b3a17..00000000 --- a/content/posts/2023-10-26-haystack-series-canals.md +++ /dev/null @@ -1,417 +0,0 @@ ---- -title: "Canals: a new concept of Pipeline" -date: 2023-10-26 -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, Python, Canals, Pipeline, DAG, "API Design"] -series: ["Haystack 2.0 Series"] -featuredImage: "/posts/2023-10-26-haystack-series-canals/cover.png" ---- - -As we have seen in [the previous episode of this series](/posts/2023-10-15-haystack-series-pipeline), Haystack's Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. - -What does this mean in practice? Let's look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1.x counterparts, and the pros and cons of this new paradigm. - -## New Use Cases - -I've already written [at length](/posts/2023-10-15-haystack-series-pipeline) about what made the original Pipeline concept so powerful and its weaknesses. Pipelines were overly effective for the use cases we could conceive while developing them, but they didn't generalize well on unforeseen situations. - -For a long time Haystack could afford not to focus on use cases that didn't fit its architecture, as I have mentioned in my [previous post](/posts/2023-10-11-haystack-series-why) about the reasons for the rewrite. The pipeline was then more than sufficient for its purposes. - -However, the situation flipped as LLMs and Generative AI entered the scene abruptly at the end of 2022. Pipeline seemingly overfit its existing use cases, fossilized on them, and could not cope with the requirements set by the new landscape of the field. - -Let's take one of these use cases and see where it leads us. - -## RAG Pipelines - -Let's take one typical example: [retrieval augmented generation](https://www.deepset.ai/blog/llms-retrieval-augmentation), or RAG for short. This technique has been used since the very early days of the Generative AI boom as an easy way to strongly [reduce hallucinations](https://haystack.deepset.ai/blog/generative-vs-extractive-models) and improve the alignment of LLMs. The basic idea is: instead of asking directly a question, such as `"What's the capital of France?"`, we send to the model a more complex prompt, that includes both the question and the answer. Such a prompt might be: - -```text -Given the following paragraph, answer the question. - -Paragraph: France is a unitary semi-presidential republic with its capital in Paris, -the country's largest city and main cultural and commercial centre; other major urban -areas include Marseille, Lyon, Toulouse, Lille, Bordeaux, Strasbourg and Nice. - -Question: What's the capital of France? - -Answer: -``` - -In this situation, the task of the LLM becomes far easier: instead of drawing facts from its internal knowledge, which might be lacking, inaccurate, or out-of-date, the model only needs to rephrase the paragraph's content to answer the question, improving the model's performance significantly. - -We now have a new problem, though. How can we provide the correct snippets of text to the LLM? This is where the "retrieval" keyword comes up. - -One of Haystack's primary use cases has been [Extractive Question Answering](https://huggingface.co/tasks/question-answering): a system where a Retriever component searches into a Document Store (such as a vector or SQL database) for snippets of text that are the most relevant to a given question. It then sends such snippets to a Reader, which highlights the keywords that answer the original question. - -By replacing a Reader model with an LLM, we get a Retrieval Augmented Generation Pipeline. Easy! - -![Generative vs Extractive QA Pipeline Graph](/posts/2023-10-26-haystack-series-canals/gen-vs-ext-qa-pipeline.png) - -So far, everything checks out. Supporting RAG with Haystack feels not only possible but natural. Let's take this simple example one step forward: what if, instead of getting the data from a document store, I want to retrieve data from the Internet? - -## Web RAG - -At first impact, the task may not seem daunting. We surely need a special Retriever that, instead of searching through a DB, searches through the Internet using a search engine. But the core concepts stay the same, and so, we assume, should the pipeline's graph. The end result should be something like this: - -![Initial Web RAG Pipeline Graph](/posts/2023-10-26-haystack-series-canals/initial-web-rag-pipeline.png) - -However, the problem doesn't end there. Search engines return links, which need to be accessed, and the content of the webpage downloaded. Such pages may be extensive and contain artifacts, so the resulting text needs to be cleaned, reduced into paragraphs, potentially embedded by a retrieval model, ranked against the original query, and only the top few resulting pieces of text need to be passed over to the LLM. Just by including these minimal requirements, our pipeline already looks like this: - -![Linear Web RAG Pipeline Graph](/posts/2023-10-26-haystack-series-canals/linear-web-rag-pipeline.png) - -And we still need to consider that URLs may reference not HTML pages but PDFs, videos, zip files, and so on. We need file converters, zip extractors, audio transcribers, and so on. - -![Multiple File Type Web RAG Pipeline Graph](/posts/2023-10-26-haystack-series-canals/multifile-web-rag-pipeline.png) - -You may notice how this use case moved quickly from looking like a simple query pipeline into a strange overlap of a query and an indexing pipeline. As we've learned in the previous post, indexing pipelines have their own set of quirks, one of which is that they can't simultaneously process files of different types. But we can only expect the Search Engine to retrieve HTML files or PDFs if we filter them out on purpose, which makes the pipeline less effective. In fact, a pipeline that can read content from different file types, such as the one above, can't really be made to work. - -And what if, on top of this, we need to cache the resulting documents to reduce latency? What if I wanted to get the results from Google's page 2, but only if the content of page 1 did not answer our question? At this point, the pipeline is hard to imagine, let alone draw. - -Although Web RAG is somewhat possible in Haystack, it stretches far beyond what the pipeline was designed to handle. Can we do better? - -## Pinpointing the issue - -When we went back to the drawing board to address these concerns, the first step was pinpointing the issue. - -The root problem, as we realized, is that Haystack Pipelines treats each component as a locomotive treats its wagons. They all look the same from the pipeline's perspective, they can all be connected in any order, and they all go from A to B rolling over the same pair of rails, passing all through the same stations. - -![Cargo Train](/posts/2023-10-26-haystack-series-canals/train.png) - -In Haystack 1, components are designed to serve the pipeline's needs first. A good component is identical to all the others, provides the exact interface the pipeline requires, and can be connected to any other in any order. The components are awkward to use outside of a pipeline due to the same `run()` method that makes the pipeline so ergonomic. Why does the Ranker, which needs only a query and a list of Documents to operate, also accept `file_paths` and `meta` in its `run()` method? It does so uniquely to satisfy the pipeline's requirements, which in turn only exist to make all components forcefully compatible with each other. - -Just like a locomotive, the pipeline pushes the components over the input data one by one. When seen in this light, it's painfully obvious why the indexing pipeline we've seen earlier can't work: the "pipeline train" can only go on one branch at a time. Component trains can't split mid-execution. They are designed to all see the same data all the time. Even when branching happens, all branches always see the same data. Sending different wagons onto different rails is not possible by design. - -## Breaking it down - -The issue's core is more evident when seen in this light. The pipeline is the only object that drives the execution, while components tend to be as passive and uniform as possible. This approach doesn't scale: components are fundamentally different, and asking them to all appear equal forces them to hide their differences, making bugs and odd behavior more likely. As the number of components to handle grows, their variety will increase regardless, so the pipeline must always be aware of all the possibilities to manage them and progressively add edge cases that rapidly increase its complexity. - -Therefore, the pipeline rewrite for Haystack 2.0 focused on one core principle: the components will define and drive the execution process. There is no locomotive anymore: every component needs to find its way, such as grabbing the data they need from the producers and sending their results to whoever needs them by declaring the proper connections. In the railway metaphor, it's like adding a steering wheel to each container: the result is a truck, and the resulting system looks now like a highway. - -![Highway](/posts/2023-10-26-haystack-series-canals/highway.png) - -Just as railways are excellent at going from A to B when you only need to take a few well-known routes and never another, highways are unbeatable at reaching every possible destination with the same effort, even though they need a driver for each wagon. A "highway" Pipeline requires more work from the Components' side, but it frees them to go wherever they need to with a precision that a "railway" pipeline cannot accomplish. - -## Canals - -The code of this new, more powerful Pipeline object found its way into its dedicated library, [Canals](https://github.com/deepset-ai/canals). By design, Canals is not geared toward specific NLP use cases, but it's a minimal, generic [ETL](https://en.wikipedia.org/wiki/Extract,_transform,_load)-like Pipeline library written purely in Python. - -Canals brings two core elements to the table: - -- The `Component` protocol, a well-defined API that Python classes need to respect to be understood by the pipeline. - -- The `Pipeline` object, the graph resolver and execution engine that also performs validation and provides a few utilities on top. - -Let's explore these two concepts one by one. - -{{< notice info >}} *All these code snippets were tested against the [main branch](https://github.com/deepset-ai/canals) of Canals. Version `0.10.0` should contain all the features highlighted in this post and you will be soon able to install it with `pip install canals==0.10.0`.* {{< /notice >}} - -## The Pipeline API - -The `Pipeline` object may remind vaguely of Haystack's original pipeline, and using one should feel familiar. For example, this is how you assemble a simple Canals Pipeline that performs a few additions. - -```python -from canals import Pipeline -from sample_components import AddFixedValue - -# Create the Pipeline object -pipeline = Pipeline() - -# Add the components - note the missing`inputs` parameter -pipeline.add_component("add_one", AddFixedValue(add=1)) -pipeline.add_component("add_two", AddFixedValue(add=2)) - -# Connect them together -pipeline.connect("add_one.result", "add_two.value") - -# Draw the pipeline -pipeline.draw("two_additions_pipeline.png") - -# Run the pipeline -results = pipeline.run({"add_one": {"value": 1}}) - -print(results) -# prints '{"add_two": {"result": 4}}' -``` - -Creating the pipeline requires no special attention: however, you can now pass a `max_loops_allowed` parameter, to limit looping when it's a risk. On the contrary, old Haystack Pipelines did not support loops at all. - -Next, components are added by calling the `Pipeline.add_component(name, component)` method. This is also subject to very similar limitations to the previous `pipeline.add_node` had: -Every component needs a unique name. -Some are reserved (for now, only `_debug`). -Instances are not reusable. -The object needs to be a component. -However, we no longer connect the components to each other using this function because, although it is possible to implement in principle, it feels more awkward to use in the case of loops. - -Consequently, we introduced a new method, `Pipeline.connect()`. This method follows the syntax `("producer_component.output_name_", "consumer_component.input_name")`: so we don't simply line up two components one after the other, but we connect one of their outputs to one of their inputs in an explicit manner. - -This change allows Canals to perform a much more careful validation of such connections. As we will discover soon, Canals components must declare the type of their inputs and outputs. In this way, Canals not only can make sure that the inputs and outputs exist for the given component, but it can also check whether their types match and can explain connection failures in great detail. For example, if there were a type mismatch, `Pipeline.connect()` will return an error such as: - -```markdown -Cannot connect 'greeter.greeting' with 'add_two.value': their declared input and output -types do not match. - -greeter: -- greeting: str -add_two: -- value: int (available) -- add: Optional[int] (available) -``` - -Once the components are connected together, the resulting pipeline can be drawn. Canals pipeline drawings show far more details than their predecessors because the components are forced to share much more information about what they need to run, the types of these variables, and so on. The pipeline above draws the following image: - -![A Pipeline making two additions](/posts/2023-10-26-haystack-series-canals/two_additions_pipeline.png) - -You can see how the components classes, their inputs and outputs, and all the connections are named and typed. - -So, how do you run such a pipeline? By just providing a dictionary of input values. Each starting component should have a small dictionary with all the necessary inputs. In the example above, we pass `1` to the `value` input of `add_one`. The results mirror the input's structure: `add_two` is at the end of the pipeline, so the pipeline will return a dictionary where under the `add_two` key there is a dictionary: `{"result": 4}`. - -By looking at the diagram, you may have noticed that these two components have optional inputs. They're not necessary for the pipeline to run, but they can be used to dynamically control the behavior of these components. In this case, `add` controls the "fixed value" this component adds to its primary input. For example: - -```python -pipeline.run({"add_one": {"value": 1, "add": 2}}) -# returns '{"add_two": {"result": 5}}' -``` - -```python -pipeline.run({"add_one": {"value": 1}, "add_two": {"add": 10}}) -# returns '{"add_two": {"result": 12}}' -``` - -One evident difficulty of this API is that it might be challenging to understand what to provide to the run method for each component. This issue has also been considered: the pipeline offers a `Pipeline.inputs()` method that returns a structured representation of all the expected input. For our pipeline, it looks like: - -```python -{ - "add_one": { - "value": { - "type": int, - "is_optional": False - }, - "add": { - "type": typing.Optional[int], - "is_optional": True - } - }, - "add_two": { - "add": { - "type": typing.Optional[int], - "is_optional": True - } - } -} -``` - - -## The Component API - -Now that we covered the Pipeline's API, let's have a look at what it takes for a Python class to be treated as a Canals' Component. - -You are going to need: - -- **A `@component` decorator**. All component classes must be decorated with the `@component` decorator. This allows Canals to discover and validate them. - -- **A `run()` method**. This is the method where the main functionality of the component should be carried out. It's invoked by `Pipeline.run()` and has a few constraints, which we will describe later. - -- Optionally, **a `warm_up()` method**. It can be used to defer the loading of a heavy resource (think a local LLM or an embedding model) to the warm-up stage that occurs right before the first execution of the pipeline. Components that use `warm_up()` can be added to a Pipeline and connected before the heavy operations are carried out. In this way, the validation that Canals performs at that stage can happen before resources are wasted. - -To summarize, a minimal Canals component can look like this: - -```python -from canals import component - -@component -class Double: - - @component.output_types(result=int) - def run(self, value: int): - return {"result": value * 2} -``` - -Note how the `run()` method has a few peculiar features. One is that all the method parameters need to be typed: if `value` was not declared as `value: int`, Canals would raise an exception demanding for typing. - -This is the way components declare to the pipeline which inputs they expect and of which type: this is the first half of the information needed to perform the validation that `Pipeline.connect()` carries out. - -The other half of the information comes from the `@component.output_types` decorator. Canals demands that components declare how many outputs the component will produce and which type. One may ask why not rely on typing for the outputs, just as we've done for the inputs. So why not simply declare components as: - - -```python -@component -class Double: - - def run(self, value: int) -> int: - return value * 2 -``` - -For `Double`, this is a legitimate solution. However, let's make an example with another component called `CheckParity`: if a component's input value is even, it sends it unchanged over the `even` output, while if it's odd, it will send it over the `odd` output. The following clearly doesn't work: we're not communicating anywhere to Canals which output is even and which one is odd. - -```python -@component -class CheckParity: - - def run(self, value: int) -> int: - if value % 2 == 0: - return value - return value -``` - -How about this instead? - -```python -@component -class CheckParity: - - def run(self, value: int) -> Dict[str, int]: - if value % 2 == 0: - return {"even": value} - return {"odd": value} -``` - -This approach carries all the information required. However, such information is only available after the `run()` method is called. Unless we parse the method to discover all return statements and their keys (which is not always possible), Canals cannot know all the keys the return dictionary may have. So, it can't validate the connections when `Pipeline.connect()` is called. - -The decorator bridges the gap by allowing the class to declare in advance what outputs it will produce and of which type. Pipeline trusts this information to be correct and validates the connections accordingly. - -Okay, but what if the component is very dynamic? The output type may depend on the input type. Perhaps the number of inputs depends on some initialization parameter. In these cases, Canals allows components to declare the inputs and output types in their init method as such: - -```python -@component -class HighlyDynamicComponent: - - def __init__(self, ...): - component.set_input_types(self, input_name=input_type, ...) - component.set_output_types(self, output_name=output_type, ...) - - def run(self, **kwargs): - ... -``` - -Note that there's no more typing on `run()`, and the decorator is gone. The information provided in the init method is sufficient for Canals to validate the connections. - -One more feature of the inputs and output declarations relates to optional and variadic values. Canals supports both through a mix of type checking and signature inspection. For example, let's have a look at how the `AddFixedValue` we've seen earlier looks like: - -```python -from typing import Optional -from canals import component - - -@component -class AddFixedValue: - """ - Adds two values together. - """ - - def __init__(self, add: int = 1): - self.add = add - - @component.output_types(result=int) - def run(self, value: int, add: Optional[int] = None): - """ - Adds two values together. - """ - if add is None: - add = self.add - return {"result": value + add} -``` - -You can see that `add`, the optional parameter we met before, has a default value. Adding a default value to a parameter in the `run()` signature tells Canals that the parameter itself is optional, so the component can run even if that specific input doesn't receive any value from the pipeline's input or other components. - -Another component that generalizes the sum operation is `Sum`, which instead looks like this: - -```python -from canals import component -from canals. component.types import Variadic - -@component -class Sum: - """ - Adds all its inputs together. - """ - - @component.output_types(total=int) - def run(self, values: Variadic[int]): - """ - :param values: the values to sum - """ - return {"total": sum(v for v in values if v is not None)} -``` - -In this case, we used the special Canals type `Variadic` to tell Canals that the `values` input can receive data from multiple producers, instead of just one. Therefore, `values` is going to be a list type, but it can be connected to single `int` outputs, making it a valuable aggregator. - -## Serialization - -Just like old Haystack Pipelines, Canals pipelines can be serialized. However, this feature suffered from similar problems plaguing the execution model, so it was changed radically. - -The original pipeline gathered intrusive information about each of its components when initialized, leveraging the shared `BaseComponent` class. Conversely, Canal's Pipelines delegate the serialization process entirely to its components. - -In Canals, if a component wishes to be serializable, it must provide two additional methods, `to_dict` and `from_dict`, which perform serialization and deserialization to a dictionary. The pipeline limits itself to calling each of its component's methods, collecting their output, grouping them together with some limited extra information (such as the connections between them), and returning the result. - -For example, if `AddFixedValue` were serializable, its serialized version could look like this: - -```python -{ - "type": "AddFixedValue", - "init_parameters": { - "add": 1 - } -} -``` - -The entire pipeline we used above would end up as follows: - -```python -{ - "max_loops_allowed": 100, - "components": { - "add_one": { - "type": "AddFixedValue", - "init_parameters": { - "add": 1 - } - }, - "add_two": { - "type": "AddFixedValue", - "init_parameters": { - "add": 2 - } - } - }, - "connections": [ - { - "sender": "add_one.result", - "receiver": "add_two.value", - } - ] -} -``` - -Notice how the components are free to perform serialization in the way they see fit. The only requirement imposed by Canals is the presence of two top-level keys, `type` and `init_parameters`, which are necessary for the pipeline to deserialize each component into the correct class. - -This is useful, especially if the component's state includes some non-trivial values, such as objects, API keys, or other special values. Pipeline no longer needs to know how to serialize everything the Components may contain: the task is fully delegated to them, which always knows best what needs to be done. - -## But... do we need any of this? - -Having done a tour of Canals features, one might have noticed one detail. Pipelines now are a bit harder to use than before: you can't just chain every component after every other. There are connections to be made, validation to perform, graphs to assemble, and so on. - -In exchange, the pipeline is now more powerful than before. Sure, but so is a plain Python script. Do we *really* need the Pipeline object? And what do we need it for? - -ETL frameworks often include an abstraction over the execution flow to make the same high-level system execute over different infrastructures, primarily for scalability and speed. They may leverage the abstraction to transparently distribute nodes on different machines, run them in parallel, increase throughput by adding replicas and other similar operations. - -For now, Canals doesn't provide anything of this kind. While we don't exclude that in the future, this abstraction may serve this purpose, there are a few other benefits that the pipeline is providing us right now: - -- **Validation**. While components normally validate their inputs and outputs, the pipeline does all the validation before the components run, even before loading heavy resources. This makes the whole system far less likely to fail at runtime for a simple input/output mismatch, which can be priceless for complex applications. - -- **Serialization**. Redistributing code is always tricky: redistributing a JSON file is much safer. Pipelines make it possible to represent complex systems in a readable JSON file that can be edited, shared, stored, deployed, and re-deployed on different backends at need. - -- **Drawing**: Canals offers a way to see your system clearly and automatically, which is often very handy for debugging, inspecting the system, and collaborating on the pipeline's design. - -- On top of this, the pipeline abstraction promotes flatter API surfaces by discouraging components nesting one within the other and providing easy-to-use, single-responsibility components that are easy to reason about. - -Having said all of this, however, we don't believe that the pipeline design makes Haystack win or lose. Pipelines are just a bonus on top of what provides the real value: a broad set of components that reliably perform well-defined tasks. That's why the Component API does not make the `run()` method awkward to use outside of a Pipeline: calling `Sum.run(values=[1, 2, 3])` feels Pythonic outside of a pipeline and always will. - -In the following posts, I will explore the world of Haystack components, starting from our now familiar use cases: RAG Pipelines. - ---- - -*Next: [RAG Pipelines from scratch](/posts/2023-10-27-haystack-series-rag)* - -*Previous: [Haystack's Pipeline](/posts/2023-10-13-haystack-series-pipeline)* - -*See the entire series here: [Haystack 2.0 series](/series/haystack-2.0-series/)* \ No newline at end of file diff --git a/content/posts/2023-10-27-haystack-series-rag.md b/content/posts/2023-10-27-haystack-series-rag.md deleted file mode 100644 index 44d1f162..00000000 --- a/content/posts/2023-10-27-haystack-series-rag.md +++ /dev/null @@ -1,436 +0,0 @@ ---- -title: "RAG Pipelines from scratch" -date: 2023-10-27 -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, NLP, Python, LLM, GPT, "Retrieval Augmentation", RAG, "Semantic Search"] -series: ["Haystack 2.0 Series"] -featuredImage: "/posts/2023-10-27-haystack-series-rag/cover.png" ---- - -*Last updated: 21/11/2023* - -Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today's NLP landscape, Haystack must enable it. - -Let's see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. At that point, the knowledge of the LLM will be only limited by the content of our data store, and all of this can be accomplished without fine-tuning language models. - -{{< notice info >}} - -💡 *I recently gave a talk about RAG applications in Haystack 2.0, so if you prefer videos to blog posts, you can find the recording [here](https://zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/). Keep in mind that the code might be slightly outdated.* - -{{< /notice >}} - -## What is RAG? - -The idea of Retrieval Augmented Generation was first defined in a [paper](https://arxiv.org/abs/2005.11401) by Meta in 2020. It was designed to solve a few of the inherent limitations of seq2seq models (language models that, given a sentence, can finish writing it for you), such as: - -- Their internal knowledge, as vast as it may be, will always be limited and at least slightly out of date. -- They work best on generic topics rather than niche and specific areas unless they're fine-tuned on purpose, which is a costly and slow process. -- All models, even those with subject-matter expertise, tend to "hallucinate": they confidently produce false statements backed by apparently solid reasoning. -- They cannot reliably cite their sources or tell where their knowledge comes from, which makes fact-checking their replies nontrivial. - -RAG solves these issues of "grounding" the LLM to reality by providing some relevant, up-to-date, and trusted information to the model together with the question. In this way, the LLM doesn't need to draw information from its internal knowledge, but it can base its replies on the snippets provided by the user. - -![RAG Paper diagram](/posts/2023-10-27-haystack-series-rag/rag-paper-image.png "A visual representation of RAG from the original paper") - -As you can see in the image above (taken directly from the original paper), a system such as RAG is made of two parts: one that finds text snippets that are relevant to the question asked by the user and a generative model, usually an LLM, that rephrases the snippets into a coherent answer for the question. - -Let's build one of these with Haystack 2.0! - -{{< notice info >}} - -💡 *Do you want to see this code in action? Check out the Colab notebook [here](https://colab.research.google.com/drive/1FkDNS3hTO4oPXHFbXQcldls0kf-KTq-r?usp=sharing) or the gist [here](https://gist.github.com/ZanSara/0af1c2ac6c71d0a723c179cc6ec1ac41)*. - -{{< /notice >}} - -{{< notice warning >}} - -⚠️ **Warning:** *This code was tested on `haystack-ai==0.149.0`. Haystack 2.0 is still unstable, so later versions might introduce breaking changes without notice until Haystack 2.0 is officially released. The concepts and components however stay the same.* - -{{< /notice >}} - -## Generators: Haystack's LLM components - -As every NLP framework that deserves its name, Haystack supports LLMs in different ways. The easiest way to query an LLM in Haystack 2.0 is through a Generator component: depending on which LLM and how you intend to query it (chat, text completion, etc...), you should pick the appropriate class. - -We're going to use `gpt-3.5-turbo` (the model behind ChatGPT) for these examples, so the component we need is [`GPTGenerator`](https://github.com/deepset-ai/haystack/blob/main/haystack/preview/components/generators/openai.py). Here is all the code required to use it to query OpenAI's `gpt-3.5-turbo` : - -```python -from haystack.preview.components.generators import GPTGenerator - -generator = GPTGenerator(api_key=api_key) -generator.run(prompt="What's the official language of France?") -# returns {"replies": ['The official language of France is French.']} -``` -You can select your favorite OpenAI model by specifying a `model_name` at initialization, for example, `gpt-4`. It also supports setting an `api_base_url` for private deployments, a `streaming_callback` if you want to see the output generated live in the terminal, and optional `kwargs` to let you pass whatever other parameter the model understands, such as the number of answers (`n`), the temperature (`temperature`), etc. - -Note that in this case, we're passing the API key to the component's constructor. This is unnecessary: `GPTGenerator` can read the value from the `OPENAI_API_KEY` environment variable and also from the `api_key` module variable of [`openai`'s SDK](https://github.com/openai/openai-python#usage). - -Right now, Haystack supports HuggingFace models through the [`HuggingFaceLocalGenerator`](https://github.com/deepset-ai/haystack/blob/main/haystack/preview/components/generators/hugging_face_local.py) and [`HuggingFaceTGIGenerator`](https://github.com/deepset-ai/haystack/blob/main/haystack/preview/components/generators/hugging_face_tgi.py) components, and many more LLMs are coming soon. - - -## PromptBuilder: structured prompts from templates - -Let's imagine that our LLM-powered application also comes with some pre-defined questions that the user can select instead of typing in full. For example, instead of asking them to type `What's the official language of France?`, we let them select `Tell me the official languages` from a list, and they simply need to type "France" (or "Wakanda" for a change - our chatbot needs some challenges too). - -In this scenario, we have two pieces of the prompt: a variable (the country name, like "France") and a prompt template, which in this case is `"What's the official language of {{ country }}?"` - -Haystack offers a component that can render variables into prompt templates: it's called [`PromptBuilder`](https://github.com/deepset-ai/haystack/blob/main/haystack/preview/components/builders/prompt_builder.py). As the generators we've seen before, also `PromptBuilder` is nearly trivial to initialize and use. - -```python -from haystack.preview.components.builders.prompt_builder import PromptBuilder - -prompt_builder = PromptBuilder(template="What's the official language of {{ country }}?") -prompt_builder.run(country="France") -# returns {'prompt': "What's the official language of France?"} -``` - -Note how we defined a variable, `country`, by wrapping its name in double curly brackets. PromptBuilder lets you define any input variable that way: if the prompt template was `"What's the official language of {{ nation }}?"`, the `run()` method of `PromptBuilder` would have expected a `nation` input. - -This syntax comes from [Jinja2](https://jinja.palletsprojects.com/en/3.0.x/intro/), a popular templating library for Python. If you have ever used Flask, Django, or Ansible, you will feel at home with `PromptBuilder`. Instead, if you never heard of any of these libraries, you can check out the [syntax](https://jinja.palletsprojects.com/en/3.0.x/templates/) on Jinja's documentation. Jinja has a powerful templating language and offers way more features than you'll ever need in prompt templates, ranging from simple if statements and for loops to object access through dot notation, nesting of templates, variables manipulation, macros, full-fledged import and encapsulation of templates, and more. - -## A Simple Generative Pipeline - -With these two components, we can assemble a minimal pipeline to see how they work together. Connecting them is trivial: `PromptBuilder` generates a `prompt` output, and `GPTGenerator` expects an input with the same name and type. - -```python -from haystack.preview import Pipeline -from haystack.preview.components.generators import GPTGenerator -from haystack.preview.components.builders.prompt_builder import PromptBuilder - -pipe = Pipeline() -pipe.add_component("prompt_builder", PromptBuilder(template="What's the official language of {{ country }}?")) -pipe.add_component("llm", GPTGenerator(api_key=api_key)) -pipe.connect("prompt_builder", "llm") - -pipe.run({"prompt_builder": {"country": "France"}}) -# returns {"llm": {"replies": ['The official language of France is French.'] }} -``` - -Here is the pipeline graph: - -![Simple LLM pipeline](/posts/2023-10-27-haystack-series-rag/simple-llm-pipeline.png) - -## Make the LLM cheat - -Building the Generative part of a RAG application was very simple! So far, we only provided the question to the LLM, but no information to base its answers on. Nowadays, LLMs possess a lot of general knowledge, so questions about famous countries such as France or Germany are easy for them to reply to correctly. However, when using an app about world countries, some users may be interested in knowing more about obscure or defunct microstates that don't exist anymore. In this case, ChatGPT is unlikely to provide the correct answer without any help. - -For example, let's ask our pipeline something *really* obscure. - -```python -pipe.run({"prompt_builder": {"country": "the Republic of Rose Island"}}) -# returns { -# "llm": { -# "replies": [ -# 'The official language of the Republic of Rose Island was Italian.' -# ] -# } -# } -``` - -The answer is an educated guess but is not accurate: although it was located just outside of Italy's territorial waters, according to [Wikipedia](https://en.wikipedia.org/wiki/Republic_of_Rose_Island) the official language of this short-lived micronation was Esperanto. - -How can we get ChatGPT to reply to such a question correctly? One way is to make it "cheat" by providing the answer as part of the question. In fact, `PromptBuilder` is designed to serve precisely this use case. - -Here is our new, more advanced prompt: - -```text -Given the following information, answer the question. -Context: {{ context }} -Question: {{ question }} -``` - -Let's build a new pipeline using this prompt! - -```python -context_template = """ -Given the following information, answer the question. -Context: {{ context }} -Question: {{ question }} -""" -language_template = "What's the official language of {{ country }}?" - -pipe = Pipeline() -pipe.add_component("context_prompt", PromptBuilder(template=context_template)) -pipe.add_component("language_prompt", PromptBuilder(template=language_template)) -pipe.add_component("llm", GPTGenerator(api_key=api_key)) -pipe.connect("language_prompt", "context_prompt.question") -pipe.connect("context_prompt", "llm") - -pipe.run({ - "context_prompt": {"context": "Rose Island had its own government, currency, post office, and commercial establishments, and the official language was Esperanto."} - "language_prompt": {"country": "the Republic of Rose Island"} -}) -# returns { -# "llm": { -# "replies": [ -# 'The official language of the Republic of Rose Island is Esperanto.' -# ] -# } -# } -``` -Let's look at the graph of our Pipeline: - -![Double PromptBuilder pipeline](/posts/2023-10-27-haystack-series-rag/double-promptbuilder-pipeline.png) - -The beauty of `PromptBuilder` lies in its flexibility. It allows users to chain instances together to assemble complex prompts from simpler schemas: for example, we used the output of the first `PromptBuilder` as the value of `question` in the second prompt. - -However, in this specific scenario, we can build a simpler system by merging the two prompts into one. - -```text -Given the following information, answer the question. -Context: {{ context }} -Question: What's the official language of {{ country }}? -``` - -Using this new prompt, the resulting pipeline becomes again very similar to our first. - -```python -template = """ -Given the following information, answer the question. -Context: {{ context }} -Question: What's the official language of {{ country }}? -""" -pipe = Pipeline() -pipe.add_component("prompt_builder", PromptBuilder(template=template)) -pipe.add_component("llm", GPTGenerator(api_key=api_key)) -pipe.connect("prompt_builder", "llm") - -pipe.run({ - "prompt_builder": { - "context": "Rose Island had its own government, currency, post office, and commercial establishments, and the official language was Esperanto.", - "country": "the Republic of Rose Island" - } -}) -# returns { -# "llm": { -# "replies": [ -# 'The official language of the Republic of Rose Island is Esperanto.' -# ] -# } -# } -``` - -![PromptBuilder with two inputs pipeline](/posts/2023-10-27-haystack-series-rag/double-variable-promptbuilder-pipeline.png) - - -## Retrieving the context - -For now, we've been playing with prompts, but the fundamental question remains unanswered: where do we get the correct text snippet for the question the user is asking? We can't expect such information as part of the input: we need our system to be able to fetch this information independently, based uniquely on the query. - -Thankfully, retrieving relevant information from large [corpora](https://en.wikipedia.org/wiki/Text_corpus) (a technical term for extensive collections of data, usually text) is a task that Haystack excels at since its inception: the components that perform this task are called [Retrievers](https://docs.haystack.deepset.ai/v2.0/docs/retrievers). - -Retrieval can be performed on different data sources: to begin, let's assume we're searching for data in a local database, which is the use case that most Retrievers are geared towards. - -Let's create a small local database to store information about some European countries. Haystack offers a neat object for these small-scale demos: `InMemoryDocumentStore`. This document store is little more than a Python dictionary under the hood but provides the same exact API as much more powerful data stores and vector stores, such as [Elasticsearch](https://github.com/deepset-ai/haystack-core-integrations/tree/main/document_stores/elasticsearch) or [ChromaDB](https://haystack.deepset.ai/integrations/chroma-documentstore). Keep in mind that the object is called "Document Store" and not simply "datastore" because what it stores is Haystack's Document objects: a small dataclass that helps other components make sense of the data that they receive. - -So, let's initialize an `InMemoryDocumentStore` and write some `Documents` into it. - -```python -from haystack.preview.dataclasses import Document -from haystack.preview.document_stores import InMemoryDocumentStore - -documents = [ - Document(content="German is the the official language of Germany."), - Document(content="The capital of France is Paris, and its official language is French."), - Document(content="Italy recognizes a few official languages, but the most widespread one is Italian."), - Document(content="Esperanto has been adopted as official language for some microstates as well, such as the Republic of Rose Island, a short-lived microstate built on a sea platform in the Adriatic Sea.") -] -docstore = InMemoryDocumentStore() -docstore.write_documents(documents=documents) - -docstore.filter_documents() -# returns [ -# Document(content="German is the the official language of Germany."), -# Document(content="The capital of France is Paris, and its official language is French."), -# Document(content="Esperanto has been adopted as official language for some microstates as well, such as the Republic of Rose Island, a short-lived microstate built on a sea platform in the Adriatic Sea."), -# Document(content="Italy recognizes a few official languages, but the most widespread one is Italian."), -# ] -``` - -Once the document store is set up, we can initialize a retriever. In Haystack 2.0, each document store comes with its own set of highly optimized retrievers: `InMemoryDocumentStore` offers two, one based on BM25 ranking and one based on embedding similarity. - -Let's start with the BM25-based retriever, which is slightly easier to set up. Let's first use it in isolation to see how it behaves. - -```python -from haystack.preview.components.retrievers import InMemoryBM25Retriever - -retriever = InMemoryBM25Retriever(document_store=docstore) -retriever.run(query="Rose Island", top_k=1) -# returns [ -# Document(content="Esperanto has been adopted as official language for some microstates as well, such as the Republic of Rose Island, a short-lived microstate built on a sea platform in the Adriatic Sea.") -# ] - -retriever.run(query="Rose Island", top_k=3) -# returns [ -# Document(content="Esperanto has been adopted as official language for some microstates as well, such as the Republic of Rose Island, a short-lived microstate built on a sea platform in the Adriatic Sea.") -# Document(content="Italy recognizes a few official languages, but the most widespread one is Italian."), -# Document(content="The capital of France is Paris, and its official language is French."), -# ] -``` - -We see that [`InMemoryBM25Retriever`](https://docs.haystack.deepset.ai/v2.0/reference/retriever-api#inmemorybm25retriever) accepts a few parameters. `query` is the question we want to find relevant documents for. In the case of BM25, the algorithm only searches for exact word matches. The resulting retriever is very fast, but it doesn't fail gracefully: it can't handle spelling mistakes, synonyms, or descriptions of an entity. For example, documents containing the word "cat" would be considered irrelevant against a query such as "felines". - -`top_k` controls the number of documents returned. We can see that in the first example, only one document is returned, the correct one. In the second, where `top_k = 3`, the retriever is forced to return three documents even if just one is relevant, so it picks the other two randomly. Although the behavior is not optimal, BM25 guarantees that if there is a document that is relevant to the query, it will be in the first position, so for now, we can use it with `top_k=1`. - -Retrievers also accepts a `filters` parameter, which lets you pre-filter the documents before retrieval. This is a powerful technique that comes useful in complex applications, but for now we have no use for it. I will talk more in detail about this topic, called metadata filtering, in a later post. - -Let's now make use of this new component in our Pipeline. - -## Our first RAG Pipeline - -The retriever does not return a single string but a list of Documents. How do we put the content of these objects into our prompt template? - -It's time to use Jinja's powerful syntax to do some unpacking on our behalf. - -```text -Given the following information, answer the question. - -Context: -{% for document in documents %} - {{ document.content }} -{% endfor %} - -Question: What's the official language of {{ country }}? -``` - -Notice how, despite the slightly alien syntax for a Python programmer, what the template does is reasonably evident: it iterates over the documents and, for each of them, renders their `content` field. - -With all these pieces set up, we can finally put them all together. - -```python -template = """ -Given the following information, answer the question. - -Context: -{% for document in documents %} - {{ document.content }} -{% endfor %} - -Question: What's the official language of {{ country }}? -""" -pipe = Pipeline() - -pipe.add_component("retriever", InMemoryBM25Retriever(document_store=docstore)) -pipe.add_component("prompt_builder", PromptBuilder(template=template)) -pipe.add_component("llm", GPTGenerator(api_key=api_key)) -pipe.connect("retriever", "prompt_builder.documents") -pipe.connect("prompt_builder", "llm") - -pipe.run({ - "retriever": {"query": country}, - "prompt_builder": { - "country": "the Republic of Rose Island" - } -}) -# returns { -# "llm": { -# "replies": [ -# 'The official language of the Republic of Rose Island is Esperanto.' -# ] -# } -# } -``` - -![BM25 RAG Pipeline](/posts/2023-10-27-haystack-series-rag/bm25-rag-pipeline.png) - -Congratulations! We've just built our first, true-to-its-name RAG Pipeline. - - -## Scaling up: Elasticsearch - -So, we now have our running prototype. What does it take to scale this system up for production workloads? - -Of course, scaling up a system to production readiness is no simple task that can be addressed in a paragraph. Still, we can start this journey with one component that can readily be improved: the document store. - -`InMemoryDocumentStore` is clearly a toy implementation: Haystack supports much more performant document stores such as [Elasticsearch](https://haystack.deepset.ai/integrations/elasticsearch-document-store), [ChromaDB](https://haystack.deepset.ai/integrations/chroma-documentstore) and [Marqo](https://haystack.deepset.ai/integrations/marqo-document-store). Since we have built our app with a BM25 retriever, let's select Elasticsearch as our production-ready document store of choice. - - -How do we use Elasticsearch on our pipeline? All it takes is to swap out `InMemoryDocumentStore` and `InMemoryBM25Retriever` with their Elasticsearch counterparts, which offer nearly identical APIs. - -First, let's create the document store: we will need a slightly more complex setup to connect to the Elasticearch backend. In this example, we use Elasticsearch version 8.8.0, but every Elasticsearch 8 version should work. - -```python -from elasticsearch_haystack.document_store import ElasticsearchDocumentStore - -host = os.environ.get("ELASTICSEARCH_HOST", "https://localhost:9200") -user = "elastic" -pwd = os.environ["ELASTICSEARCH_PASSWORD"] # You need to provide this value - -docstore = ElasticsearchDocumentStore( - hosts=[host], - basic_auth=(user, pwd), - ca_certs="/content/elasticsearch-8.8.0/config/certs/http_ca.crt" -) -``` - -Now, let's write again our four documents into the store. In this case, we specify the duplicate policy, so if the documents were already present, they would be overwritten. All Haystack document stores offer three policies to handle duplicates: `FAIL` (the default), `SKIP`, and `OVERWRITE`. - -```python -from haystack.preview.document_stores import DuplicatePolicy -documents = [ - Document(content="German is the the official language of Germany."), - Document(content="The capital of France is Paris, and its official language is French."), - Document(content="Italy recognizes a few official languages, but the most widespread one is Italian."), - Document(content="Esperanto has been adopted as official language for some microstates as well, such as the Republic of Rose Island, a short-lived microstate built on a sea platform in the Adriatic Sea.") -] -docstore.write_documents(documents=documents, policy=DuplicatePolicy.OVERWRITE) -``` - -Once this is done, we are ready to build the same pipeline as before, but using `ElasticsearchBM25Retriever`. - -```python -from elasticsearch_haystack.bm25_retriever import ElasticsearchBM25Retriever - -template = """ -Given the following information, answer the question. - -Context: -{% for document in documents %} - {{ document.content }} -{% endfor %} - -Question: What's the official language of {{ country }}? -""" - -pipe = Pipeline() -pipe.add_component("retriever", ElasticsearchBM25Retriever(document_store=docstore)) -pipe.add_component("prompt_builder", PromptBuilder(template=template)) -pipe.add_component("llm", GPTGenerator(api_key=api_key)) -pipe.connect("retriever", "prompt_builder.documents") -pipe.connect("prompt_builder", "llm") - -pipe.draw("elasticsearch-rag-pipeline.png") - -country = "the Republic of Rose Island" -pipe.run({ - "retriever": {"query": country}, - "prompt_builder": {"country": country} -}) -# returns { -# "llm": { -# "replies": [ -# 'The official language of the Republic of Rose Island is Esperanto.' -# ] -# } -# } -``` - -![Elasticsearch RAG Pipeline](/posts/2023-10-27-haystack-series-rag/elasticsearch-rag-pipeline.png) - -That's it! We're now running the same pipeline over a production-ready Elasticsearch instance. - -## Wrapping up - -In this post, we've detailed some fundamental components that make RAG applications possible with Haystack: Generators, the PromptBuilder, and Retrievers. We've seen how they can all be used in isolation and how you can make Pipelines out of them to achieve the same goal. Last, we've experimented with some of the (very early!) features that make Haystack 2.0 production-ready and easy to scale up from a simple demo with minimal changes. - -However, this is just the start of our journey into RAG. Stay tuned! - ---- - -*Next: [Indexing data for RAG applications](/posts/2023-11-05-haystack-series-minimal-indexing)* - -*Previous: [Canals: a new concept of Pipeline](/posts/2023-10-26-haystack-series-canals)* - -*See the entire series here: [Haystack 2.0 series](/series/haystack-2.0-series/)* - -*Cover image from [Wikipedia](https://it.wikipedia.org/wiki/File:Isoladellerose.jpg)* diff --git a/content/posts/2023-11-05-haystack-series-minimal-indexing.md b/content/posts/2023-11-05-haystack-series-minimal-indexing.md deleted file mode 100644 index 52bd81d1..00000000 --- a/content/posts/2023-11-05-haystack-series-minimal-indexing.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -title: "Indexing data for RAG applications" -date: 2023-11-05 -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, NLP, Python, LLM, "Retrieval Augmentation", RAG, "indexing", "Document Store"] -series: ["Haystack 2.0 Series"] -featuredImage: "/posts/2023-11-05-haystack-series-minimal-indexing/cover.png" ---- - -In the [previous post](/posts/2023-10-27-haystack-series-rag) of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn't extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? - -In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. - -{{< notice info >}} - -💡 *Do you want to see the code in action? Check out the [Colab notebook](https://colab.research.google.com/drive/155CtcumiK5w3wX6FWyM1dG3OqnhwnCqy?usp=sharing) or the [gist](https://gist.github.com/ZanSara/ba7efd241c61ccfd12ed48195e23bb34).* - -{{< /notice >}} - -{{< notice warning >}} - -⚠️ **Warning:** *This code was tested on `haystack-ai==0.117.0`. Haystack 2.0 is still unstable, so later versions might introduce breaking changes without notice until Haystack 2.0 is officially released. The concepts and components, however, stay the same.* - -{{< /notice >}} - - -# The task - -In Haystack's terminology, the process of extracting information from a group of files and storing the data in a document store is called "indexing". The process includes, at the very minimum, reading the content of a file, generating a Document object containing all its text, and then storing it in a document store. - -However, indexing pipelines often do more than this. They can process more than one file type, like .txt, .pdf, .docx, .html, audio, video, and images. Having many file types to convert, they route each file to the proper converter based on its type. Files tend to contain way more text than a normal LLM can chew, so they need to split those huge Documents into smaller chunks. Also, the converters are not perfect at reading text from the files, so they need to clean the data from artifacts such as page numbers, headers, footers, and so on. On top of all of this, if you plan to use a retriever that is based on embedding similarity, your indexing pipeline will also need to embed all documents before writing them into the store. - -Sounds like a lot of work! - -In this post, we will focus on the preprocessing part of the pipeline: cleaning, splitting, and writing documents. I will talk about the other functionalities of indexing pipelines, such as document embedding and multiple file types routing, in later posts. - -# Converting files - -As we've just seen, the most important task of this pipeline is to convert files into Documents. Haystack provides several converters for this task: at the time of writing, it supports: - -- Raw text files (`TextFileToDocument`) -- HTML files, so web pages in general (`HTMLToDocument`) -- PDF files, by extracting text natively (`PyPDFToDocument`) -- Image files, PDFs with images, and Office files with images, by OCR (`AzureOCRDocumentConverter`) -- Audio files, doing transcription with Whisper either locally (`LocalWhisperTranscriber`) or remotely using OpenAI's hosted models (`RemoteWhisperTranscriber`) -- A ton of [other formats](https://tika.apache.org/2.9.1/formats.html), such as Microsoft's Office formats, thanks to [Apache Tika](https://tika.apache.org/) (`TikaDocumentConverter`) - -For this example, let's assume we have a collection of web pages downloaded from the Internet. These pages are our only source of information and contain all we want our RAG application to know about. - -In this case, our converter of choice is `HTMLToDocument`. `HTMLToDocument` is a Haystack component that understands HTML and can filter all the markup away, leaving only meaningful text. Remember that this is a file converter, not a URL fetcher: it can only process local files, such as a website crawl. Haystack provides some components to fetch web pages, but we will see them later. - -Here is how you can use this converter: - -```python -from haystack.preview.components.file_converters.html import HTMLToDocument - -path = "Republic_of_Rose_Island.html" - -converter = HTMLToDocument() -converter.run(sources=[path]) - -# returns {"documents": [Document(content="The Republic of Rose Isla...")]} -``` - -`HTMLToDocument` is a straightforward component that offers close to no parameters to customize its behavior. Of its API, one notable feature is its input type: this converter can take paths to local files in the form of strings or `Path` objects, but it also accepts `ByteStream` objects. - -`ByteStream` is a handy Haystack abstraction that makes handling binary streams easier. If a component accepts `ByteStream` as input, you don't necessarily have to save your web pages to file before passing them to this converter. This allows components that retrieve large files from the Internet to pipe their output directly into this component without saving the data to disk first, which can save a lot of time. - -# Cleaning the text - -With `HTMLToDocument`, we can convert whole web pages into large Document objects. The converter typically does a decent job of filtering out the markup. Still, it's not always perfect. To compensate for these occasional issues, Haystack offers a component called `DocumentCleaner` that can remove noise from the text of the documents. - -Just like any other component, `DocumentCleaner` is straightforward to use: - -```python -from haystack.preview.components.preprocessors.document_cleaner import DocumentCleaner - -cleaner = DocumentCleaner() -cleaner.run(documents=documents) -# returns {"documents": [Document(content=...), Document(content=...), ...]} -``` - -The effectiveness of `DocumentCleaner` depends a lot on the type of converter you use. Some flags, such as `remove_empty_lines` and `remove_extra_whitespace`, are minor fixes that can come in handy but usually have little impact on the quality of the results when used in a RAG pipeline. They can, however, make a vast difference for Extractive QA pipelines. - -Other parameters, like `remove_substrings` or `remove_regex`, work very well but need manual inspection and iteration from a human to get right. For example, for Wikipedia pages, we could use these parameters to remove all instances of the word `"Wikipedia"`, which are undoubtedly many and irrelevant. - -Finally, `remove_repeated_substrings` is a convenient method that removes headers and footers from long text, for example, books and articles. However, it works only for PDFs and, to a limited degree, for text files because it relies on the presence of form feed characters (`\f`), which are rarely present in web pages. - -# Splitting the text - -Now that the text is cleaned up, we can move onto a more exciting step: text splitting. - -So far, each Document stored the content of an entire file. If a file was a whole book with hundreds of pages, a single Document would contain hundreds of thousands of words, which is clearly too much for an LLM to make sense of. Such a large Document is also challenging for Retrievers to understand because it contains so much text that it looks relevant to every possible question. To populate our document store with data that can be used effectively by a RAG pipeline, we need to chunk this data into much smaller Documents. - -That's where `TextDocumentSplitter` comes into play. - -{{< notice info >}} - -💡 *With LLMs in a race to offer the [largest context window](https://magic.dev/blog/ltm-1) and research showing that such a chase is [counterproductive](https://arxiv.org/abs/2307.03172), there is no general consensus about how splitting Documents for RAG impacts the LLM's performance.* - -*What you need to keep in mind is that splitting implies a tradeoff. Huge documents will always be slightly relevant for every question, but they will bring a lot of context, which may or may not confuse the model. On the other hand, tiny Documents are much more likely to be retrieved only for questions they're highly relevant for, but they might provide too little context for the LLM to really understand their meaning.* - -*Tweaking the size of your Documents for the specific LLM you're using and the topic of your documents is one way to optimize your RAG pipeline, so be ready to experiment with different Document sizes before committing to one.* - -{{< /notice >}} - -How is it used? - -```python -from haystack.preview.components.preprocessors.text_document_splitter import TextDocumentSplitter - -text_splitter = TextDocumentSplitter(split_by="sentence", split_length=5) -text_splitter.run(documents=documents) - -# returns {"documents": [Document(content=...), Document(content=...), ...]} -``` - -`TextDocumentSplitter` lets you configure the approximate size of the chunks you want to generate with three parameters: `split_by`, `split_length`, and `split_overlap`. - -`split_by` defines the unit to use when splitting some text. For now, the options are `word`, `sentence`, and `passage` (paragraph), but we will soon add other options. - -`split_length` is the number of the units defined above each document should include. For example, if the unit is `sentence`, `split_length=10` means that all your Documents will contain 10 sentences worth of text (except usually for the last document, which may have less). If the unit was `word`, it would instead contain 10 words. - -`split_overlap` is the amount of units that should be included from the previous Document. For example, if the unit is `sentence` and the length is `10`, setting `split_overlap=2` means that the last two sentences of the first document will also be present at the start of the second, which will include only 8 new sentences for a total of 10. Such repetition carries over to the end of the text to split. - -# Writing to the store - -Once all of this is done, we can finally move on to the last step of our journey: writing the Documents into our document store. We first create the document store: - -```python -from haystack.preview.document_stores import InMemoryDocumentStore - -document_store = InMemoryDocumentStore() -``` - -and then use `DocumentWriter` to actually write the documents in: - - -```python -from haystack.preview.components.writers import DocumentWriter - -writer = DocumentWriter(document_store=document_store) -writer.run(documents=documents_with_embeddings) -# returns {"documents_written": 120} -``` - -If you've read my [previous post](/posts/2023-10-27-haystack-series-rag) about RAG pipelines, you may wonder: why use `DocumentWriter` when we could call the `.write_documents()` method of our document store? - -In fact, the two methods are fully equivalent: `DocumentWriter` does nothing more than calling the `.write_documents()` method of the document store. The difference is that `DocumentWriter` is the way to go if you are using a Pipeline, which is what we're going to do next. - -# Putting it all together - -We finally have all the components we need to go from a list of web pages to a document store populated with clean and short Document objects. Let's build a Pipeline to sum up this process: - -```python -from haystack.preview import Pipeline - -document_store = InMemoryDocumentStore() - -pipeline = Pipeline() -pipeline.add_component("converter", HTMLToDocument()) -pipeline.add_component("cleaner", DocumentCleaner()) -pipeline.add_component("splitter", TextDocumentSplitter(split_by="sentence", split_length=5)) -pipeline.add_component("writer", DocumentWriter(document_store=document_store)) -pipeline.connect("converter", "cleaner") -pipeline.connect("cleaner", "splitter") -pipeline.connect("splitter", "writer") - -pipeline.draw("simple-indexing-pipeline.png") - -pipeline.run({"converter": {"sources": file_names}}) -``` - -![Indexing Pipeline](/posts/2023-11-05-haystack-series-minimal-indexing/simple-indexing-pipeline.png) - -That's it! We now have a fully functional indexing pipeline that can take a list of web pages and convert them into Documents that our RAG pipeline can use. As long as the RAG pipeline reads from the same store we are writing the Documents to, we can add as many Documents as we need to keep the chatbot's answers up to date without having to touch the RAG pipeline. - -To try it out, we only need to take the RAG pipeline we built in [my previous post](/posts/2023-10-27-haystack-series-rag) and connect it to the same document store we just populated: - -```python -from haystack.preview.components.generators.openai.gpt import GPTGenerator -from haystack.preview.components.builders.prompt_builder import PromptBuilder -from haystack.preview.components.retrievers.in_memory_bm25_retriever import InMemoryBM25Retriever - -template = """ -Given the following information, answer the question: {{ question }} - -{% for document in documents %} - {{ document.content }} -{% endfor %} -""" -pipe = Pipeline() - -pipe.add_component("retriever", InMemoryBM25Retriever(document_store=document_store)) -pipe.add_component("prompt_builder", PromptBuilder(template=template)) -pipe.add_component("llm", GPTGenerator(api_key=api_key)) -pipe.connect("retriever", "prompt_builder.documents") -pipe.connect("prompt_builder", "llm") - -question = "Is there any documentary about the story of Rose Island? Can you tell me something about that?" -pipe.run({ - "retriever": {"query": question}, - "prompt_builder": {"question": question} -}) - -# returns { -# 'llm': { -# 'replies': [ -# 'Yes, there is a documentary about the story of Rose Island. It is -# called "Rose Island" and was released on Netflix on 8 December 2020. -# The documentary follows the true story of Giorgio Rosa, an Italian -# engineer who built his own island in the Adriatic sea in the late -# 1960s. The island housed a restaurant, bar, souvenir shop, and even -# a post office. Rosa\'s goal was to have his self-made structure -# recognized as an independent state, leading to a battle with the -# Italian authorities. The film depicts the construction of the island -# and Rosa\'s refusal to dismantle it despite government demands. The -# story of Rose Island was relatively unknown until the release of the -# documentary. The film showcases the technology Rosa invented to build -# the island and explores themes of freedom and resilience.' -# ], -# 'metadata': [...] -# } -# } -``` - -And suddenly, our chatbot knows everything about Rose Island without us having to feed the data to the document store by hand. - -# Wrapping up - -Indexing pipelines can be powerful tools, even in their simplest form, like the one we just built. However, it doesn't end here: Haystack offers many more facilities to extend what's possible with indexing pipelines, like doing web searches, downloading files from the web, processing many other file types, and so on. - -We will see how soon, so stay tuned! - ---- - -*Next: [The World of Web RAG](/posts/2023-11-09-haystack-series-simple-web-rag)* - -*Previous: [RAG Pipelines from scratch](/posts/2023-10-27-haystack-series-rag)* - -*See the entire series here: [Haystack 2.0 series](/series/haystack-2.0-series/)* - - -*Cover image from [this website.](https://bertolamifineart.bidinside.com/en/lot/126352/1968-insula-de-la-rozoj-o-isola-delle-/)* diff --git a/content/posts/2023-11-09-haystack-series-simple-web-rag.md b/content/posts/2023-11-09-haystack-series-simple-web-rag.md deleted file mode 100644 index d0a9b5c9..00000000 --- a/content/posts/2023-11-09-haystack-series-simple-web-rag.md +++ /dev/null @@ -1,371 +0,0 @@ ---- -title: "The World of Web RAG" -date: 2023-11-09 -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, NLP, Python, LLM, GPT, "Retrieval Augmentation", RAG, Web, indexing] -series: ["Haystack 2.0 Series"] -featuredImage: "/posts/2023-11-09-haystack-series-simple-web-rag/cover.jpeg" ---- - -In an earlier post of the Haystack 2.0 series, we've seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. - -In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. - -{{< notice info >}} - -💡 *Do you want to see the code in action? Check out the [Colab notebook](https://colab.research.google.com/drive/1dGMPxReo730j7_zQDZOu-0SGf-pk4XDL?usp=sharing) or the [gist](https://gist.github.com/ZanSara/0907a8f3ae19f62998cc061ed6e8ce53).* - -{{< /notice >}} - -{{< notice warning >}} - -⚠️ **Warning:** *This code was tested on `haystack-ai==0.130.0`. Haystack 2.0 is still unstable, so later versions might introduce breaking changes without notice until Haystack 2.0 is officially released. The concepts and components, however, stay the same.* - -{{< /notice >}} - - -# Searching the Web - -As we've seen [earlier](/posts/2023-10-27-haystack-series-rag), a Haystack RAG Pipeline is made of three components: a Retriever, a PromptBuilder, and a Generator, and looks like this: - -![BM25 RAG Pipeline](/posts/2023-11-09-haystack-series-simple-web-rag/bm25-rag-pipeline.png) - -To make this pipeline use the Web as its data source, we need to change the retriever with a component that does not look into a local document store for information but can search the web. - -Haystack 2.0 already provides a search engine component called `SerperDevWebSearch`. It uses [SerperDev's API](https://serper.dev/) to query popular search engines and return two types of data: a list of text snippets coming from the search engine's preview boxes and a list of links, which point to the top search results. - -To begin, let's see how to use this component in isolation. - -```python -from haystack.preview.components.websearch import SerperDevWebSearch - -question = "What's the official language of the Republic of Rose Island?" - -search = SerperDevWebSearch(api_key=serperdev_api_key) -results = search.run(query=question) -# returns { -# "documents": [ -# Document(content='Esperanto', meta={'title': 'Republic of Rose Island - Wikipedia', 'link': 'https://en.wikipedia.org/wiki/Republic_of_Rose_Island'}), -# Document(content="The Republic of Rose Island was a short-lived micronation on a man-made platform in the Adriatic Sea. It's a story that few people knew of until recently, ...", meta={'title': 'Rose Island - The story of a micronation', 'link': 'https://www.rose-island.co/', 'imageUrl': 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQiRCfTO6OwFS32SX37S-7OadDZCNK6Fy_NZVGsci2gcIS-zcinhOcGhgU&s', 'position': 1}, -# ... -# ], -# "links": [ -# 'https://www.rose-island.co/', -# 'https://www.defactoborders.org/places/rose-island', -# ... -# ] -# } -``` - -`SerperDevWebSearch` is a component with a simple interface. Starting from its output, we can see that it returns not one but two different values in the returned dictionary: `documents` and `links`. - -`links` is the most straightforward and represents the top results that Google found relevant for the input query. It's a list of strings, each containing a URL. You can configure the number of links to return with the `top_k` init parameter. - -`documents` instead is a list of already fully formed Document objects. The content of these objects corresponds to the "answer boxes" that Google often returns together with its search results. Given that these code snippets are usually clean and short pieces of text, they're perfect to be fed directly to an LLM without further processing. - -Other than expecting an API key as an init parameter and `top_k` to control the number of results, `SerperDevWebSearch` also accepts an `allowed_domains` parameter, which lets you configure the domains Google is allowed to look into during search, and `search_params`, a more generic dictionary input that lets you pass any additional search parameter SerperDev's API understand. - -# A Minimal Web RAG Pipeline - -`SerperDevWebSearch` is actually the bare minimum we need to be able to build our very first Web RAG Pipeline. All we need to do is replace our original example's Retriever with our search component. - -This is the result: - -```python -from haystack.preview import Pipeline -from haystack.preview.components.builders import PromptBuilder -from haystack.preview.components.generators import GPTGenerator - -template = """ -Question: {{ question }} - -Google Search Answer Boxes: -{% for document in documents %} - {{ document.content }} -{% endfor %} - -Please reformulate the information above to -answer the user's question. -""" -pipe = Pipeline() - -pipe.add_component("search", SerperDevWebSearch(api_key=serperdev_api_key)) -pipe.add_component("prompt_builder", PromptBuilder(template=template)) -pipe.add_component("llm", GPTGenerator(api_key=api_key)) -pipe.connect("search.documents", "prompt_builder.documents") -pipe.connect("prompt_builder", "llm") - -question = "What's the official language of the Republic of Rose Island?" -pipe.run({ - "search": {"query": question}, - "prompt_builder": {"question": question} -}) -# returns { -# 'llm': { -# 'replies': [ -# "The official language of the Republic of Rose Island is Esperanto. This artificial language was chosen by the residents of Rose Island as their national language when they declared independence in 1968. However, it's important to note that despite having their own language, government, currency, and postal service, Rose Island was never officially recognized as an independent nation by any country." -# ], -# 'metadata': [...] -# } -# } -``` - -![Minimal Web RAG Pipeline](/posts/2023-11-09-haystack-series-simple-web-rag/minimal-web-rag-pipeline.png) - -This solution is already quite effective for simple questions because Google does most of the heavy lifting of reading the content of the top results, extracting the relevant snippets, and packaging them up in a way that is really easy to access and understand by the model. - -However, there are situations in which this approach is not sufficient. For example, for highly technical or nuanced questions, the answer box does not provide enough context for the LLM to elaborate and grasp the entire scope of the discussion. In these situations, we may need to turn to the second output of `SerperDevWebSearch`: the links. - -# Fetching URLs - -Haystack offers components to read the content of a URL: it's `LinkContentFetcher`. Let's see this component in action. - -```python -from haystack.preview.components.fetchers.link_content import LinkContentFetcher - -fetcher = LinkContentFetcher() -fetcher.run(urls=["https://en.wikipedia.org/wiki/Republic_of_Rose_Island"]) -# returns { -# "streams": [ -# ByteStream(data=b"\n<...") -# ] -# } -``` - -First, let's notice that `LinkContentFetcher` outputs a list of `ByteStream` objects. `ByteStream` is a Haystack abstraction that makes handling binary streams and files equally easy. When a component produces `ByteStream` as output, you can directly pass these objects to a Converter component that can extract its textual content without saving such binary content to a file. - -These features come in handy to connect `LinkContentFetcher` to a component we've already met before: `HTMLToDocument`. - -# Processing the page - -In a [previous post](/posts/2023-11-05-haystack-series-minimal-indexing), we've seen how Haystack can convert web pages into clean Documents ready to be stored in a Document Store. We will reuse many of the components we have discussed there, so if you missed it, make sure to check it out. - -From the pipeline in question, we're interested in three of its components: `HTMLToDocument`, `DocumentCleaner`, and `DocumentSplitter`. Once the search component returns the links and `LinkContentFetcher` downloaded their content, we can connect it to `HTMLToDocument` to extract the text and `DocumentCleaner` and `DocumentSplitter` to clean and chunk the content, respectively. These documents then can go to the `PromptBuilder`, resulting in a pipeline such as this: - -```python -template = """ -Question: {{ question }} - -Context: -{% for document in documents %} - {{ document.content }} -{% endfor %} - -Please reformulate the information above to answer the user's question. -""" -pipe = Pipeline() - -pipe.add_component("search", SerperDevWebSearch(api_key=serperdev_api_key)) -pipe.add_component("fetcher", LinkContentFetcher()) -pipe.add_component("converter", HTMLToDocument()) -pipe.add_component("cleaner", DocumentCleaner()) -pipe.add_component("splitter", DocumentSplitter(split_by="sentence", split_length=3)) -pipe.add_component("prompt_builder", PromptBuilder(template=template)) -pipe.add_component("llm", GPTGenerator(api_key=api_key)) -pipe.connect("search.links", "fetcher") -pipe.connect("fetcher", "converter") -pipe.connect("converter", "cleaner") -pipe.connect("cleaner", "splitter") -pipe.connect("splitter", "prompt_builder.documents") -pipe.connect("prompt_builder", "llm") - -question = "What's the official language of the Republic of Rose Island?" -pipe.run({ - "search": {"query": question}, - "prompt_builder": {"question": question} -}) -``` - -![Incorrect Web RAG Pipeline](/posts/2023-11-09-haystack-series-simple-web-rag/incorrect-web-rag-pipeline.png) - -However, running this pipeline results in a crash. - -``` -PipelineRuntimeError: llm raised 'InvalidRequestError: This model's maximum context -length is 4097 tokens. However, your messages resulted in 4911 tokens. Please reduce -the length of the messages.' -``` - -Reading the error message reveals the issue right away: the LLM received too much text. And that's to be expected because we just passed the entire content of several web pages to it. - -We need to find a way to filter only the most relevant documents from the long list that is generated by `DocumentSplitter`. - -# Ranking Documents on the fly - -Retrievers are optimized to use the efficient retrieval engines of document stores to sift quickly through vast collections of Documents. However, Haystack also provides smaller, standalone components that work very well on shorter lists and don't require a full-blown vector database engine to function. - -These components are called rankers. One example of such a component is `TransformersSimilarityRanker`: a ranker that uses a model from the `transformers` library to rank Documents by their similarity to a given query. - -Let's see how it works: - -```python -from haystack.preview.components.rankers.transformers_similarity import TransformersSimilarityRanker - -ranker = TransformersSimilarityRanker() -ranker.warm_up() -ranker.run( - query="What's the official language of the Republic of Rose Island?", - documents=documents, - top_k=1 - ) -# returns { -# 'documents': [ -# Document(content="Island under construction\nRepublic of Rose Island\nThe Republic of Rose Island ( Esperanto : Respubliko de la Insulo de la Rozoj; Italian : Repubblica dell'Isola delle Rose) was a short-lived micronation on a man-made platform in the Adriatic Sea , 11 kilometres (6.8\xa0mi) off the coast of the province of Rimini , Italy, built by Italian engineer Giorgio Rosa, who made himself its president and declared it an independent state on 1 May 1968. [1] [2] Rose Island had its own government, currency, post office, and commercial establishments, and the official language was Esperanto .", meta={'source_id': '03bfe5f7b7a7ec623e854d2bc5eb36ba3cdf06e1e2771b3a529eeb7e669431b6'}, score=7.594357490539551) -# ] -# } -``` - -This component has a feature we haven't encountered before: the `warm_up()` method. - -Components that need to initialize heavy resources, such as a language model, always perform this operation after initializing them in the `warm_up()` method. When they are used in a Pipeline, `Pipeline.run()` takes care of calling `warm_up()` on all components before running; when used standalone, users need to call `warm_up()` explicitly to prepare the object to run. - -`TransformersSimilarityRanker` accepts a few parameters. When initialized, it accepts a `model_name_or_path` with the HuggingFace ID of the model to use for ranking: this value defaults to `cross-encoder/ms-marco-MiniLM-L-6-v2`. It also takes `token`, to allow users to download private models from the Models Hub, `device`, to let them leverage PyTorch's ability to select the hardware to run on, and `top_k`, the maximum number of documents to return. `top_k`, as we see above, can also be passed to `run()`, and the latter overcomes the former if both are set. This value defaults to 10. - -Let's also put this component in the pipeline: its place is between the splitter and the prompt builder. - -```python -template = """ -Question: {{ question }} - -Context: -{% for document in documents %} - {{ document.content }} -{% endfor %} - -Please reformulate the information above to answer the user's question. -""" -pipe = Pipeline() - -pipe.add_component("search", SerperDevWebSearch(api_key=serperdev_api_key)) -pipe.add_component("fetcher", LinkContentFetcher()) -pipe.add_component("converter", HTMLToDocument()) -pipe.add_component("cleaner", DocumentCleaner()) -pipe.add_component("splitter", DocumentSplitter(split_by="sentence", split_length=3)) -pipe.add_component("ranker", TransformersSimilarityRanker()) -pipe.add_component("prompt_builder", PromptBuilder(template=template)) -pipe.add_component("llm", GPTGenerator(api_key=api_key)) -pipe.connect("search.links", "fetcher") -pipe.connect("fetcher", "converter") -pipe.connect("converter", "cleaner") -pipe.connect("cleaner", "splitter") -pipe.connect("splitter", "ranker") -pipe.connect("ranker", "prompt_builder.documents") -pipe.connect("prompt_builder", "llm") - -question = "What's the official language of the Republic of Rose Island?" - -pipe.run({ - "search": {"query": question}, - "ranker": {"query": question}, - "prompt_builder": {"question": question} -}) -# returns { -# 'llm': { -# 'replies': [ -# 'The official language of the Republic of Rose Island was Esperanto.' -# ], -# 'metadata': [...] -# } -# } -``` - -![Unfiltered Web RAG Pipeline](/posts/2023-11-09-haystack-series-simple-web-rag/unfiltered-web-rag-pipeline.png) - - -Note how the ranker needs to know the question to compare the documents, just like the search and prompt builder components do. So, we need to pass the value to the pipeline's `run()` call. - -# Filtering file types - -The pipeline we just built works great in most cases. However, it may occasionally fail if the search component happens to return some URL that does not point to a web page but, for example, directly to a video, a PDF, or a PPTX. - -Haystack does offer some facilities to deal with these file types, but we will see these converters in another post. For now, let's only filter those links out to prevent `HTMLToDocument` from crashing. - -This task could be approached with Haystack in several ways, but the simplest in this scenario is to use a component that would typically be used for a slightly different purpose. This component is called `FileTypeRouter`. - -`FileTypeRouter` is designed to route different files to their appropriate converters by checking their mime type. It does so by inspecting the content or the extension of the files it receives in input and producing an output dictionary with a separate list for each identified type. - -However, we can also conveniently use this component as a filter. Let's see how! - -```python -from haystack.preview.components.routers.file_type_router import FileTypeRouter - -router = FileTypeRouter(mime_types=["text/html"]) -router.run(sources=["Republic_of_Rose_Island.txt", "Republic_of_Rose_Island.html"]) -# returns defaultdict(list, -# {'unclassified': [PosixPath('Republic_of_Rose_Island.txt')], -# 'text/html': [PosixPath('Republic_of_Rose_Island.html')]}) -``` - -`FileTypeRouter` must always be initialized with the list of mime types it is supposed to handle. Not only that, but this component can also deal with files that do not match any of the expected mime types by putting them all under the `unclassified` category. - -By putting this component between `LinkContentFetcher` and `HTMLToDocument`, we can make it forward along the pipeline only the files that match the `text/html` mime type and silently discard all others. - -Notice how, in the pipeline below, I explicitly connect the `text/html` output only: - -```python -template = """ -Question: {{ question }} - -Google Search Answer Boxes: -{% for document in documents %} - {{ document.content }} -{% endfor %} - -Please reformulate the information above to answer the user's question. -""" -pipe = Pipeline() - -pipe.add_component("search", SerperDevWebSearch(api_key=serperdev_api_key)) -pipe.add_component("fetcher", LinkContentFetcher()) -pipe.add_component("filter", FileTypeRouter(mime_types=["text/html"])) -pipe.add_component("converter", HTMLToDocument()) -pipe.add_component("cleaner", DocumentCleaner()) -pipe.add_component("splitter", DocumentSplitter(split_by="sentence", split_length=3)) -pipe.add_component("ranker", TransformersSimilarityRanker()) -pipe.add_component("prompt_builder", PromptBuilder(template=template)) -pipe.add_component("llm", GPTGenerator(api_key=api_key)) -pipe.connect("search.links", "fetcher") -pipe.connect("fetcher", "filter") -pipe.connect("filter.text/html", "converter") -pipe.connect("converter", "cleaner") -pipe.connect("cleaner", "splitter") -pipe.connect("splitter", "ranker") -pipe.connect("ranker", "prompt_builder.documents") -pipe.connect("prompt_builder", "llm") - -question = "What's the official language of the Republic of Rose Island?" - -pipe.run({ - "search": {"query": question}, - "ranker": {"query": question}, - "prompt_builder": {"question": question} -}) -# returns { -# 'llm': { -# 'replies': [ -# 'The official language of the Republic of Rose Island was Esperanto.' -# ], -# 'metadata': [...] -# } -# } -``` - -![HTML-only Web RAG Pipeline](/posts/2023-11-09-haystack-series-simple-web-rag/html-web-rag-pipeline.png) - -With this last addition, we added quite a bit of robustness to our pipeline, making it less likely to fail. - -# Wrapping up - -Web RAG is a use case that can be expanded to cover many use cases, resulting in very complex pipelines. Haystack helps make sense of their complexity by pipeline graphs and detailed error messages in case of mismatch connections. However, pipelines this large can become overwhelming, especially when more branches are added. - -In one of our next posts, we will see how to cover such use cases while keeping the resulting complexity as low as possible. - ---- - -*Next: Soon!* - -*Previous: [Indexing data for RAG applications](/posts/2023-11-05-haystack-series-minimal-indexing)* - -*See the entire series here: [Haystack 2.0 series](/series/haystack-2.0-series/)* - -*Cover image from [Wikipedia](https://commons.wikimedia.org/wiki/File:Isola_delle_Rose_1968.jpg)* diff --git a/content/posts/drafts/2023-11-xx-haystack-chatbots-with-script.md b/content/posts/drafts/2023-11-xx-haystack-chatbots-with-script.md deleted file mode 100644 index 7a455149..00000000 --- a/content/posts/drafts/2023-11-xx-haystack-chatbots-with-script.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: "[DRAFT] Unusual Haystack: chatbots with a script" -date: 2023-11-xx -author: "ZanSara" -tags: [Haystack, "Haystack 2.0", "Canals", "Chatbots", "DAG", LLMs] -featuredImage: "/posts/2023-11-xx-haystack-chatbots-with-script/cover.png" -draft: true ---- - -*Cover image by [DALL-E 3](https://openai.com/dall-e-3)* - -When we think about chatbots, we normally imagine them as assistants that react to our requests, like customer support bots or personal assistants. But what if the roles were inverted? Can LLM-based chatbots take an active role in the conversation with a human and drive the discussion like an interviewer would do? - -In this post I am going to show you how to use Haystack in a very unconventional way to build very elaborate chatbots that don't simply react to the user's queries, but follow a script to get the answers out of the user in a structured form. - -# The Context: Restaurant Booking - -Let's imagine we are building a chatbot for a famous restaurant, one that is constantly overbooked. At such places, tables can normally can be reserved online with a simple form, but this one wants to also offer this functionality through a chatbot. - -The bot has to collect some key information from the user: the time and date of the reservation, how many people to book for, and it also has to ask about any sort of food restrictions and potentially reschedule if the menu of the selected day is not suitable for the guests. It must obtain the phone number of the reference person and its name, and make sure all the guests are informed that there is a strict dresscode at the place and that they must respect it to be allowed inside. For the sake of the example, let's imagine the restaurant has two locations, one of which has outdoor seating during the summer as well, while in the other there may be occasional TV crews as the restaurant hosts regular cooking competitions. And let's not forget that the guests may have special requests that should also be recorded by the bot. - -The process is far from straightforward, and for as good as current LLMs are nowadays at handling context, we can't always rely on them ticking all the boxes in a free-form conversation, especially if the chat itself becomes confusing. We need to make sure that the bot is always on top of the conversation. - -# A Tree of Questions - -One task that Haystack 2.0 really shines with is the building of complex pipelines, and one way to interpret a pipeline's graph in the context of a chatbot application is to see it as a decision tree, where each node is a unit of information that the bot should get from the user before moving on. - -For example, we can try to summarize the requirements above in a script that may look like this: - -![graph TD; -START --> A -B --> HUMAN -D --> END -A["Identify the restaurant and\nask for reason of the call"] --"something else"--> B["Route to human assistant"] -A --"make reservation"--> F["Ask for the number of guests"] -F --"number is unknown"--> D["Close the call specifying\nno reservation was made"] -F --"number is given" --> C["List availability and ask for the desired\nlocation, time and date"] -C --"no match"--> D -C --"location B" --> H["Ask whether they are ok\nwith TV crews filming them"] -C --"location A\nin summer" --> G["Ask whether they prefer\nindoor or outdoor seating"] -H --"not ok with filming" --> D -C --"location A\nin winter"--> E["Ask for food restrictions\nrelated to the place/date selected"] -G --"indoor seating" --> E -G --"outdoor seating" --> E -H --"ok with filming" --> E -E --"there are\nproblems"--> C -E --"no problems" --> I["Ask whether they are ok\nwith the dresscode"] -I --"dresscode not ok" --> D -I --"dresscode ok"--> J["Ask for nameand phone number\nfor the reservation"] -J --"they can't give contact info" --> D -J --"contact info provided" --> K["Summarize reservation details\nand end the chat"] -K --> END](/posts/2023-11-xx-haystack-chatbots-with-script/conversation-tree.png) - -Unlike old-shool chatbots, each step of the above tree may not be a single question. The bot may need to iterate with the user a few times in order to get the required information fully. For example, asking the users whether they want to sit indoors or outdoors at location A might go as such: - -> Bot: Location A offers indoor and outdoor seating. There's availability for both right now. What would be your preference? -> -> User: We prefer outside if it's not too cold. Is it cold there at that time of the night? -> -> Bot: Average temperatures for 8 PM in mid April range between 18C and 23C, so it's already possible to dine outdoors in good weather. -> -> User: What happens in case of rain? -> -> Bot: If rain is forecast for the evening, a few guests will be moved indoors and the remaining are going to be rescheduled for another date at our best availability. -> -> User: We will stay indoors then. - -Worse yet, users may initially give a reply, and then later on in the conversation change their mind: - -> Bot: So it's Location A, indoor seating. Do any of you have any food restrictions we should be aware of? -> -> User: No, none has. I'm sorry, can we change the time of the reservation? One of my friends can't make it by 8 PM. If we could move it to 9 PM that would be fantastic. -> -> Bot: I'm sorry, at 9 PM we only have availability at Location B. Is that fine for you? -> -> User: Is it at Location B where they have the TV crews? -> -> Bot: Yes, at Location B we have a video crew every evening in April. -> -> User: That doesn't work for us. Can we select another day? - -And so on. While the script for the bot is straigthforward, the user may jump through the conversation in unexpected ways, and the bot needs to be able to handle all that in a graceful way without forgetting to collect any piece of information. - -# Query the user "step-by-step" - -One of the most important features of the tree above is that the wider objective of "making a reservation" can be broken down into a tree of subtasks, which we can call "conversation steps". At every step the objective is quite clear: the bot has to extract one specific bit of information from the user and should not consider it done until the user has given such information. - -This implies that at each conversation step we have a loop: -- The bot asks for the information. -- The user replies. -- The bot needs to assess whether the user's reply contains the required information. -- If it does, move onto the next step. -- If it does not, check whether the user changed topic of if they asked for clarifications - - If they asked for clarifications, continue the conversation until they give the answer - - If they changed topic, jump to the conversation step that handles such topic. - -From this breakdown we already realize that there are two main components in this pipeline: one that handles the conversation steps, and one that manages sudden changes of topic. We can call them `ConversationStep` and `TopicIdentifier`. - -Let's try to design them! - -# Conversation Step - - - diff --git a/content/posts/drafts/2023-11-xx-haystack-series-embedding-retrieval.md b/content/posts/drafts/2023-11-xx-haystack-series-embedding-retrieval.md deleted file mode 100644 index a1a2fa98..00000000 --- a/content/posts/drafts/2023-11-xx-haystack-series-embedding-retrieval.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: "[DRAFT] Embedding Retrieval" -date: 2023-11-xx -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, NLP, Python, LLM, "Retrieval Augmentation", RAG, "indexing", "Document Store", Embeddings] -series: ["Haystack 2.0 Series"] -featuredImage: "/posts/2023-11-xx-haystack-series-minimal-indexing/cover.png" -draft: true -# canonicalUrl: https://haystack.deepset.ai/blog/.... ---- -*[The Republic of Rose Island, Wikipedia](https://it.wikipedia.org/wiki/Isola_delle_Rose)* - -In the previous post of the Haystack 2.0 series we've seen how to build RAG pipelines using a generator, a prompt builder and a retriever with its document store. However, the content of our document store wasn't exactly extensive, and populating one with clean, properly formatted data may seem like a daunting task. - -In this post I will show you how to use Haystack 2.0 to populate a document store that you can then use for retrieval. - -{{< notice info >}} - -💡 *Do you want to see the code in action? Check out the [Colab notebook](https://drive.google.com/file/d/1cM1M61VBIWcIkulCpM9uTdObid2Vj47G/view?usp=sharing) or the [gist](#).* - -{{< /notice >}} - -{{< notice warning >}} - -⚠️ **Warning:** *This code was tested on `haystack-ai==0.117.0`. Haystack 2.0 is still unstable, so later versions might introduce breaking changes without notice until Haystack 2.0 is officially released. The concepts and components however stay the same.* - -{{< /notice >}} - - -# The task - -In Haystack's terminology, the process of extracting information from a group of files and properly store it into a document store is called "indexing". The process includes, at the very minimum, reading the content of a file and generate a Document object from it, and then store this Document into a document store. So you will need: - -- A file converter -- A document writer - -This is the very bare minimum set you need in a pipeline that does indexing. - -However, common indexing pipelines include more than two components. They should be able to process more than one file type, for example .txt, .pdf, .docx, .html, if not audio and video files, or images, so we need one component for each of these types. Having many file types to convert, we need a routing component that sends each file to the proper converter based in their type. Consider also that files tend to contain way more text than a normal LLM can chew, so we will need to split those huge Documents into smaller chunks. Also, the converters may not always do a perfect job, so we may need to clean the data from artifacts such as page numbers, headers and footers, and so on. On top of all of this, if you plan to use a retriever which is based on embedding similarity, you will also need to embed all documents before writing them into the store. - -So you'll end up with a list that looks more like this: - -- A file type router -- Several file converters -- A document cleaner -- A document splitter -- A document embedder -- A document writer - -Sounds like a lot of work! - -In this post we will focus on the preprocessing part of the pipeline, so on cleaning, splitting, embedding and writing of documents. Later I am going to make another post focusing on all the converters that Haystack offers and how to build more "multimodal" indexing Pipelines. - -# Converting files - -One of the most important tasks of this pipeline is to convert files into Documents. Haystack provides several converters for this task: at the time of writing, it supports: - -- Raw text files (`TextFileToDocument`) -- HTML files, so web pages in general (`HTMLToDocument`) -- PDF files, by extracting text natively (`PyPDFToDocument`) -- Image files, PDFs with images and Office files with images, by OCR (`AzureOCRDocumentConverter`) -- Audio files, doing transcription with Whisper either locally (`LocalWhisperTranscriber`) or remotely using OpenAI's hosted models (`RemoteWhisperTranscriber`) -- A ton of [other formats](https://tika.apache.org/2.9.1/formats.html), such as Microsoft's Office formats, thanks to [Apache Tika](https://tika.apache.org/) (`TikaDocumentConverter`) - -In this post we are going to use only webpages, so our converter of choice is `HTMLToDocument`. - -`HTMLToDocument` is a converter that understands HTML and to extract only meaningful text from it, filtering all the markup away. Keep in mind that this is a file converter, not a URL fetcher: it can only process local files, such as a website crawl. Haystack provides some components to fetch webpages, but we are going to see them in a later post. - -Here is how you use this converter on a local file: - -```python -from haystack.preview.components.file_converters.html import HTMLToDocument - -path = "Republic_of_Rose_Island.html" - -converter = HTMLToDocument() -converter.run(sources=[path]) - -# returns {"documents": [Document(text="The Republic of Rose Isla...")]} -``` - -`HTMLToDocument` is a very simple component that offers close to no parameters to customize its behavior. One interesting feature is the input types it accepts: it can take paths to local files in the form of strings or `Path` objects, but it also accepts `ByteStream` objects. - -`ByteStream` is a handy Haystack abstraction that makes handling binary streams easier. So components that are retrieving large files from the Internet, or otherwise producing them on the fly, can "pipe" them directly into this component without saving the data to disk first. - -# Cleaning the text - -We've seen how to take whole web pages and convert them into large Document objects. The converters normally do a good job, but it's rarely parfect: so Haystack offers a component called `DocumentCleaner` that can help remove some noise from the text of the resulting documents. - -Just as any other component, `DocumentCleaner` is rather straightforward to use. - -```python -from haystack.preview.components.preprocessors.document_cleaner import DocumentCleaner - -cleaner = DocumentCleaner() -cleaner.run(documents=documents) -# returns {"documents": [Document(text=...), Document(text=...), ...]} -``` - -The effectiveness of `DocumentCleaner` depends a lot on the type of converter you use. Some flags, such as `remove_empty_lines` and `remove_extra_whitespace`, are small fixes which can come handy, but normally have little impact on the quality of the results when used in a RAG pipeline. They can, however, make a vast difference for Extractive QA. - -Other parameters, like `remove_substrings` or `remove_regex` work very well but need manual inspection and iteration from a human to get right. For example, for Wikipedia pages we could use them to remove all instances of the word `"Wikipedia"`, which are undoubtedly many and irrelevant. - -Finally, `remove_repeated_substrings` is a convenient method that removed headers and footers from long text, for example books and articles, but in fact it works only for PDFs and to a limited degree for text files, because it relies on the presence of form feed characters (`\f`), which are rarely present in web pages. - -# Splitting the text - -Now that the text is cleaned up, we can move onto a more interesting process: text splitting. - -So far, each Document stored the content of an entire file. If a file was a whole book with hundreds of pages, a single Document would contain hundreds of thousands of words, which is clearly too much for an LLM to make sense of (for now). Such a large Document is also very hard for Retrievers to understand, because it contains so much text that it ends up looking relevant for every possible question. To populate our document store with data that can be used effectively by a RAG pipeline, we need to chunk this data into much smaller Documents. - -That's where `TextDocumentSplitter` comes to play. - -{{< notice info >}} - -💡 *With LLMs in a race to offer the [largest context window](https://magic.dev/blog/ltm-1) and research showing that such chase is [counterproductive](https://arxiv.org/abs/2307.03172), there is no general consensus about how splitting Documents for RAG impacts the LLM's performance.* - -*What you need to keep in mind is that splitting implies a tradeoff. Huge documents are always going to be a bit relevant for every question, but they will bring a lot of context which may, or may not, confuse the model. On the other hand, very small Documents are much more likely to be retrieved only for questions they're highly relevant for, but they might provide too little context for the LLM to really understand their meaning.* - -*Tweaking the size of your Documents for the specific LLM you're using and the domain of your application is one way to optimize your RAG pipeline, so be ready to experiment with different Document sizes before committing to one.* - -{{< /notice >}} - -How is it used? - -```python -from haystack.preview.components.preprocessors.text_document_splitter import TextDocumentSplitter - -text_splitter = TextDocumentSplitter(split_by="sentence", split_length=5) -text_splitter.run(documents=documents) - -# returns {"documents": [Document(text=...), Document(text=...), ...]} -``` - -`TextDocumentSplitter` lets you configure the approximate size of the chunks you want to generate with three parameters: `split_by`, `split_length` and `split_overlap`. - -`split_by` defines the unit to use when splitting some text. For now the options are `word`, `sentence` and `passage` (paragraph), but we will soon add other options such as tokens. - -`split_length` is the number of the units defined above each document should include. For example, if the unit is `sentence`, `split_length=10` means that all you Documents will contain 10 sentences worth of text (except usually for the last document, which will contain less). If the unit was `word`, it would instead contain 10 words. - -`split_overlap` is the amount of unit that should be included from the previous Document. For example, if the unit is `sentence` and the length is `10`, setting `split_overlap=2` means that the last two sentences of the first document will be present also at the start of the second, which will include only 8 new sentences for a total of 10. Such repetition carries over to the end of the text to split. - -# Compute text embeddings - -Now that we have a set of clean, short Documents, we're almost ready to store them. At this stage there's an optional step that we can take, which is, to compute the embeddings for each Document. - -In my previous post about RAG pipelines I've used (purposefully) a BM25 Retriever, which does not use embeddings. However, BM25 is by far not the most effective or failsafe retrieval method: embedding based retrieval, in fact, regularly outperforms it in nearly every scenario. - -There are some downsides of using embeddings for retrieval: -- Computing embedding similarity is normally more expensive than computing a BM25 score, -- Both the query and all the Documents need to be converted into embeddings before the calculations can take place, -- Every model architecture has its own embedding type, which also require a dedicated tokenizer. - -So, even if it is more powerful, using embeddings for retrieval does require some extra work. - -If you decide to use this retrieval style, you would need to compute embeddings for all your Documents before storing them. Such a calculation should be done right before writing the Documents into the store, and makes use of a component called Embedder. - -Right now Haystack offers two types of Embedders for Documents: one for OpenAI models, and one for SentenceTransformers models. We are going to use the OpenAI one for now, which is called `OpenAIDocumentEmbedder`. - -Let's see hos it is used: - -```python -from haystack.preview.components.embedders.openai_document_embedder import OpenAIDocumentEmbedder - -embedder = OpenAIDocumentEmbedder(api_key=api_key) -embedder.run(documents=split_documents)["documents"] - -# returns {"documents": [Document(text=...), Document(text=...), ...]} -``` -After running the embedder, the list of Documents may look unchanged. However, after passing through this component, all Documents will have their `embedding` field populated with a large vector, that can then be used by the document store for retrieval. - -`OpenAIDocumentEmbedder` offers some parameters to let you customize the generation of your embeddings. To begin, you can choose the `model_name`: the default is `text-embedding-ada-002`. You can then pass the OpenAI `organization` keyword if that's necessary to authenticate. - -Two parameters, `prefix` and `suffix`, do pretty much what the name implies: they put fixed prefixes or suffixes to the document's content right before embedding it. It may be useful in some conditions to make the embedding more meaningful to the LLM, but it strongly depends on your usecase. - -`metadata_fields_to_embed` provides something even more powerful: it appends the content of the document's metadata to the text. Often the metadata of a document contains information that the text is missing, but that is crucial to understand its content. For example, the metadata can containt the date the Document was created, giving the LLM a more precise sense of time. Or if the Document is a snippet of text from a legal document, it may be talking about "the Company", the "First Party", and so on, but the definition of such terms is not the paragraph: if the metadata contains keys such as `{"title": "Certificate of Incorporation - XYZ Inc."}`, it allows the LLM to frame the content of the Document much more precisely. In short: the richer the metadata, the more useful it is to embed it together with the Document's content. - -Finally, `embedding_separator` is a small fine-tuning parameter that sets which character to use to separate the metadata fields to embed when they are appended to the Document's content. You normally don't need to use it. - -# Writing to the store - -Once all of this is done, we can finally move onto the last step of our journey: writing the Documents into our document store. Luckily, this process is really simple. We first create the document store: - -```python -from haystack.preview.document_stores import InMemoryDocumentStore - -document_store = InMemoryDocumentStore() -``` - -and then use `DocumentWriter` to actually write the documents in. - - -```python -from haystack.preview.components.writers import DocumentWriter - -writer = DocumentWriter(document_store=document_store) -writer.run(documents=documents_with_embeddings) -``` - -If you've seen my [previous post](/posts/2023-10-27-haystack-series-rag) about RAG Pipelines you may wonder: why use `DocumentWriter` when we could just call the `.write_documents()` method of our document store? - -In fact, the two methods are fully equivalent: `DocumentWriter` does nothing more than calling the `.write_documents()` method of the document store. The difference is that `DocumentWriter` is the way to go if you are using a Pipeline, which is exactly what we're going to do next. - -# Putting it all together - -We finally have all the components we need to go from a heterogeneous list of files to a document store populated with clean, short, searchable Document objects. Let's build a Pipeline to sum up this entire process: - -```python -from haystack.preview import Pipeline - -document_store = InMemoryDocumentStore() - -pipeline = Pipeline() -pipeline.add_component("converter", HTMLToDocument()) -pipeline.add_component("cleaner", DocumentCleaner()) -pipeline.add_component("splitter", TextDocumentSplitter(split_by="sentence", split_length=5)) -pipeline.add_component("embedder", OpenAIDocumentEmbedder(api_key=api_key)) -pipeline.add_component("writer", DocumentWriter(document_store=document_store)) -pipeline.connect("converter", "cleaner") -pipeline.connect("cleaner", "splitter") -pipeline.connect("splitter", "embedder") -pipeline.connect("embedder", "writer") - -pipeline.draw("simple-indexing-pipeline.png") - -pipeline.run({"converter": {"sources": file_names}}) -``` - -![Indexing Pipeline](/posts/2023-11-xx-haystack-series-minimal-indexing/simple-indexing-pipeline.png) - -That's it! We now have a fully functional indexing pipeline that can take a web page and convert them into Documents that our RAG pipeline can use. As long as the RAG pipeline reads from the store we are writing the Documents too, we can add as many Documents as we need to keep the chatbot's answers up to date without having to touch the RAG pipeline at all. - -However, it doesn't end here. This pipeline is very simple: Haystack offers many more facilities to extend what's possible with indexing pipelines much further, like doing web searches, downloading files from the web, processing many other file types, and so on. We will see how soon, so make sure to check out the next posts. - ---- - -*Next: Soon!* - -*Previous: [RAG Pipelines from scratch](/posts/2023-10-27-haystack-series-rag)* - -*See the entire series here: [Haystack 2.0 series](/series/haystack-2.0-series/)* \ No newline at end of file diff --git a/content/posts/drafts/2023-11-xx-haystack-series-extended-indexing.md b/content/posts/drafts/2023-11-xx-haystack-series-extended-indexing.md deleted file mode 100644 index 2544517c..00000000 --- a/content/posts/drafts/2023-11-xx-haystack-series-extended-indexing.md +++ /dev/null @@ -1,386 +0,0 @@ ---- -title: "[DRAFT] Indexing any type of file" -date: 2023-11-xx -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, NLP, Python, LLM, "Retrieval Augmentation", RAG, "indexing", "Document Store", Embeddings] -series: ["Haystack 2.0 Series"] -featuredImage: "/posts/2023-11-xx-haystack-series-extended-indexing/cover.png" -draft: true -# canonicalUrl: https://haystack.deepset.ai/blog/indexing-data-for-rag-applications ---- -*[The Republic of Rose Island, Wikipedia](https://it.wikipedia.org/wiki/Isola_delle_Rose)* - - -In the previous post of the Haystack 2.0 series we've seen how to build RAG pipelines using a generator, a prompt builder and a retriever with its document store. However, the content of our document store wasn't exactly extensive, and populating one with clean, properly formatted data may seem like a daunting task. - -In this post I will show you a few ways to use Haystack 2.0 to populate a document store that you can then use for retrieval. - -{{< notice info >}} - -💡 *Do you want to see the code in action? Check out the [Colab notebook](https://colab.research.google.com/drive/1gmdQem6f0RBYBb0HeBDPZwbb7_JU3-Us?usp=sharing) or the [gist](#).* - -{{< /notice >}} - -{{< notice warning >}} - -⚠️ **Warning:** *This code was tested on `haystack-ai==0.105.0`. Haystack 2.0 is still unstable, so later versions might introduce breaking changes without notice until Haystack 2.0 is officially released. The concepts and components however stay the same.* - -{{< /notice >}} - - -# The task - -In Haystack's terminology, the process of extracting information from a group of files and properly store it into a document store is called "indexing". The process includes, at the very minimum, reading the content of a file and generate a Document object from it, and then store this Document into a document store. So you will need: - -- A file converter -- A document writer - -This is the very bare minimum set you need in a pipeline that does indexing. - -However, common indexing pipelines include more than two components. They should be able to process more than one file type, for example .txt, .pdf, .docx, .html, if not audio and video files, or images, so we need one component for each of these types. Having many file types to convert, we need a routing component that sends each file to the proper converter based in their type. Consider also that files tend to contain way more text than a normal LLM can chew, so we will need to split those huge Documents into smaller chunks. Also, the converters may not always do a perfect job, so we may need to clean the data from artifacts such as page numbers, headers and footers, and so on. On top of all of this, if you plan to use a retriever which is based on embedding similarity, you will also need to embed all documents before writing them into the store. - -So you'll end up with a list that looks more like this: - -- A file type router -- Several file converters -- A document cleaner -- A document splitter -- A document embedder -- A document writer - -Sounds like a lot of work! Let's explore this pipeline one component at a time. - -# Converting files - -One of the most important tasks of this pipeline is to convert files into Documents. Haystack provides several converters for this task: at the time of writing, it supports: - -- Raw text files (`TextFileToDocument`) -- HTML files, so web pages in general (`HTMLToDocument`) -- PDF files, by extracting text natively (`PyPDFToDocument`) -- Image files, PDFs with images and Office files with images, by OCR (`AzureOCRDocumentConverter`) -- Audio files, doing transcription with Whisper either locally (`LocalWhisperTranscriber`) or remotely using OpenAI's hosted models (`RemoteWhisperTranscriber`) -- A ton of [other formats](https://tika.apache.org/2.9.1/formats.html), such as Microsoft's Office formats, thanks to [Apache Tika](https://tika.apache.org/) (`TikaDocumentConverter`) - -For the sake of making the example easy to run on Colab, I'll skip Tika, Azure and the local Whisper component. We are left with four converters for text files, web pages, PDFs and audio files. Let's check them out! - -## Text files - -`TextFileToDocument` is a rather basic converter that reads the content of a text file and dumps it into a Document object. It's perfect for raw text files, code files, and for now it's also very handy for Markdown and other human-readable markup formats, like Wikipedia dumps. We are [already working](https://github.com/deepset-ai/haystack/pull/6159) to introduce dedicated converters to extract more information from markup formats, but `TextFileToDocument` will always be a quick-and-dirty option as well. - -Here is how you can use it to convert some files. - -```python -from haystack.preview.components.file_converters.txt import TextFileToDocument - -path = "Republic_of_Rose_Island.txt" - -converter = TextFileToDocument() -converter.run(paths=[path]) - -# returns {"documents": [Document(text="The '''Republic of Rose Isla...")]} -``` - -Note that for each input path you will get a Document out. As we passed the path to a single file, `TextFileToDocument` produced a single, large Document as a result. - -The behavior of `TextFileToDocument` can be customized to support some different usecases. For example, you can select the text encoding to expect (such as `utf-8`, `latin-1`, and so on) through the `encoding` parameter, but you can also make it filter out text in unexpected languages using the `valid_languages` parameter, which uses `langdetect` under the hood. - -Another advanced feature is the removal of numerical lines: `remove_numeric_tables` can be set to `True` to make the converter try to spot numerical tables and automatically remove them from the text, while `numeric_row_threshold` sets the maximum percentage of numerical characters that can be present in a line before it's considered to be a numerical table and filtered out. This cleanup step is disabled by default. - -## Web pages - -`HTMLToDocument` is a converter that understands HTML and to extract only meaningful text from it, filtering all the markup away. Keep in mind that this is a file converter, not a URL fetcher: it can only process local files, such as a website crawl. Haystack provides some components to fetch webpages, but we are going to see them in a later post. - -Here is how you use this converter on a local file. - -```python -from haystack.preview.components.file_converters.html import HTMLToDocument - -path = "Republic_of_Rose_Island.html" - -converter = HTMLToDocument() -converter.run(sources=[path]) - -# returns {"documents": [Document(text="The Republic of Rose Isla...")]} -``` - -`HTMLToDocument` is even simpler than the text converter, and for now offers close to no parameters to customize its behavior. One interesting feature though its the input types it accepts: it can take paths to local files in the form of strings or `Path` objects, but it also accepts `ByteStream` objects. - -`ByteStream` is a handy Haystack abstraction that makes handling binary streams easier. So components that are retrieving large files from the Internet, or otherwise producing them on the fly, can "pipe" them directly into this component without saving the data to diks first. - -## PDFs - -`PyPDFToDocument`, as the name implies, uses the `pypdf` Python library to extract text from PDF files. Note that PyPDF can extract everything that is stored as text in the PDF, but it cannot recognise text stored in pictures. For that you need an OCR-capable converter, such as `AzureOCRDocumentConverter`, which needs an Azure API key. - -Let's convert some files with `PyPDFToDocument`: - -```python -from haystack.preview.components.file_converters.pypdf import PyPDFToDocument - -path = "Republic_of_Rose_Island.pdf" - -converter = PyPDFToDocument() -pdf_documents = converter.run(sources=[path]) - -# returns {"documents": [Document(text="The Republic of Rose Isla...")]} -``` - -`PyPDFToDocument` is also very simple and offers no parameters to customize its behavior for now. Just as `HTMLToDocument`, it also accepts as input strings, paths, and `ByteStream` objects, which will come handy when we will see how to retrieve documents from the Internet. - -## Audio files - -Last but not least, Haystack provides transcriber components based on [Whisper](https://openai.com/research/whisper) that can be used to convert audio files into text Documents. Whisper is open source, but is also available as an API from OpenAI: as a consequence, there are two transcribers available: - -- `LocalWhisperTranscriber`: it downloads the selected Whisper model from HuggingFace and performs the transcription locally. -- `RemoteWhisperTranscriber`: it uses OpenAI API to run inference remotely, which may be faster, but required an API key. - -For the sake of this example we will use the remote transcriber, but the local one is nearly identical. Here is how you use them: - -```python -from haystack.preview.components.audio.whisper_remote import RemoteWhisperTranscriber - -path = "/content/Republic_of_Rose_Island.mp3" - -converter = RemoteWhisperTranscriber(api_key=api_key) -converter.run(audio_files=[path]) - -# returns {"documents": [Document(text="The Republic of Rose Isla...")]} -``` - -This converter lets you pass any parameter that the Whisper model understand as kwargs, so if the API changes, the component will keep working. Other than that, it offers no other specific parameters. - - -# Routing files - -Now that we're familiar with the simple API offered by the file converters, let's track back one step: in a list of files, how to send the appropriate file to the correct converter? - -This step is especially important for Pipelines, so Haystack offers a small component that is fit for the purpose: `FileTypeRouter`. This component routes the files by mime type, and expects a list of all the possible mimetypes the pipeline can handle at init time. - -Given that our pipeline can handle text files, HTML files, PDFs and audios, the supported mime types in our case are `["text/plain", "text/html", "audio/mpeg", "application/pdf"]`. If you don't know which mime types match your file types, check out [this list](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types) with the most common types, and in turn links to the [official spec](https://www.iana.org/assignments/media-types/media-types.xhtml). To be really accurate we could add other text and audio types to the list and route all of them to `TextFileToDocument` and `RemoteWhisperTranscriber`, but for this example this is going to be sufficient. - -Here is how it's used: - -```python -from haystack.preview.components.routers.file_type_router import FileTypeRouter - -router = FileTypeRouter( - mime_types=["text/plain", "text/html", "audio/mpeg", "application/pdf"] -) -router.run( - sources=[ - "Republic_of_Rose_Island.txt", - "Republic_of_Rose_Island.html", - "Republic_of_Rose_Island.pdf", - "Republic_of_Rose_Island.mp3" - ] -) -# returns { -# 'text/plain': [PosixPath('Republic_of_Rose_Island.txt')], -# 'text/html': [PosixPath('Republic_of_Rose_Island.html')], -# 'application/pdf': [PosixPath('Republic_of_Rose_Island.pdf')], -# 'audio/mpeg': [PosixPath('Republic_of_Rose_Island.mp3')], -# } -``` -Note how, just as the HTML and PDF converters, also `FileTypeRouter` expects `sources` as input, meaning that it knows how to deal with `ByteStream` objects. - -However, what happens if we add a file of a mime type that is not included in the list above, or a path to a file that doesn't exist at all? - -```python -router.run( - sources=[ - "Republic_of_Rose_Island.png", - "Republic_of_Rose_Island.mp3", - "I_do_not_even_exist_and_I_have_no_extension" - ] -) -# returns { -# 'audio/mpeg': [PosixPath('Republic_of_Rose_Island.mp3')], -# 'unclassified': [ -# PosixPath('Republic_of_Rose_Island.png'), -# PosixPath('I_do_not_even_exist_and_I_have_no_extension') -# ], -# } -``` - -This is one powerful feature of `FileTypeRouter`: it can not only route files each to their own converter, but it can also filter out files that we have no use for. - - -# Cleaning the text - -So we now have a way to classify the files by type and we can convert them all into a bunch of large Document objects. The converters normally do a good job, but it's rarely parfect: so Haystack offers a component called `DocumentCleaner` that can help remove some noise from the text of the resulting documents. - -Just as any other component, `DocumentCleaner` is rather straightforward to use. - -```python -from haystack.preview.components.preprocessors.text_document_cleaner import DocumentCleaner - -cleaner = DocumentCleaner() -cleaner.run(documents=documents) -# returns {"documents": [Document(text=...), Document(text=...), ...]} -``` - -The effectiveness of `DocumentCleaner` depends a lot on the type of converter you use. Some flags, such as `remove_empty_lines` and `remove_extra_whitespace`, are small fixes which can come handy, but normally have little impact on the quality of the results when used in a RAG pipeline. They can, however, make a vast difference for Extractive QA. - -Other parameters, like `remove_substrings` or `remove_regex` work very well but need manual inspection and iteration from a human to get right. For example, for Wikipedia pages we could use them to remove all instances of the word `"Wikipedia"`, which are undoubtedly many and irrelevant. - -Finally, `remove_repeated_substrings` is a convenient method that removed headers and footers from long text, for example books and articles, but in fact it works only for PDFs and to a limited degree for text files, because it relies on the presence of form feed characters (`\f`). - -# Splitting the text - -Now that the text is cleaned up, we can mov onto a more interesting process: text splitting. - -So far, each Document stored the content of an entire file. If a file was a whole book with hundreds of pages, a single Document would contain hundreds of thousands of words, which is clearly too much for an LLM to make sense of (for now). Such a large Document is also very hard for Retrievers to understand, because it contains so much text that it ends up looking relevant for every possible question. To populate our document store with data that can be used effectively by a RAG pipeline, we need to chunk this data into much smaller Documents. - -That's where `TextDocumentSplitter` comes to play. - -{{< notice info >}} - -💡 *With LLMs in a race to offer the [largest context window](https://magic.dev/blog/ltm-1) and research showing that such chase is [counterproductive](https://arxiv.org/abs/2307.03172), there is no general consensus about how splitting Documents for RAG impacts the LLM's performance.* - -*What you need to keep in mind is that splitting implies a tradeoff. Huge documents are always going to be a bit relevant for every question, but they will bring a lot of context which may, or may not, confuse the model. On the other hand, very small Documents are much more likely to be retrieved only for questions they're highly relevant for, but they might provide too little context for the LLM to really understand their meaning.* - -*Tweaking the size of your Documents for the specific LLM you're using and the domain of your application is one way to optimize your RAG pipeline, so be ready to experiment with different Document sizes before committing to one.* - -{{< /notice >}} - -How is it used? - -```python -from haystack.preview.components.preprocessors.text_document_splitter import TextDocumentSplitter - -text_splitter = TextDocumentSplitter(split_by="sentence", split_length=5) -text_splitter.run(documents=documents) - -# returns {"documents": [Document(text=...), Document(text=...), ...]} -``` - -`TextDocumentSplitter` lets you configure the approximate size of the chunks you want to generate with three parameters: `split_by`, `split_length` and `split_overlap`. - -`split_by` defines the unit to use when splitting some text. For now the options are `word`, `sentence` and `passage` (paragraph), but we will soon add other options such as tokens. - -`split_length` is the number of the units defined above each document should include. For example, if the unit is `sentence`, `split_length=10` means that all you Documents will contain 10 sentences worth of text (except usually for the last document, which will contain less). If the unit was `word`, it would instead contain 10 words. - -`split_overlap` is the amount of unit that should be included from the previous Document. For example, if the unit is `sentence` and the length is `10`, setting `split_overlap=2` means that the last two sentences of the first document will be present also at the start of the second, which will include only 8 new sentences for a total of 10. Such repetition carries over to the end of the text to split. - -# Compute text embeddings - -Now that we have a set of clean, short Documents, we're almost ready to store them. At this stage there's an optional step that we can take, which is, to compute the embeddings for each Document. - -In my previous post about RAG pipelines I've used (purposefully) a BM25 Retriever, which does not use embeddings. However, BM25 is by far not the most effective or failsafe retrieval method: embedding based retrieval, in fact, regularly outperforms it in nearly every scenario. - -There are some downsides of using embeddings for retrieval: -- Computing embedding similarity is normally more expensive than computing a BM25 score, -- Both the query and all the Documents need to be converted into embeddings before the calculations can take place, -- Every model architecture has its own embedding type, which also require a dedicated tokenizer. - -So, even if it is more powerful, using embeddings for retrieval does require some extra work. - -If you decide to use this retrieval style, you would need to compute embeddings for all your Documents before storing them. Such a calculation should be done right before writing the Documents into the store, and makes use of a component called Embedder. - -Right now Haystack offers two types of Embedders for Documents: one for OpenAI models, and one for SentenceTransformers models. We are going to use the OpenAI one for now, which is called `OpenAIDocumentEmbedder`. - -Let's see hos it is used: - -```python -from haystack.preview.components.embedders.openai_document_embedder import OpenAIDocumentEmbedder - -embedder = OpenAIDocumentEmbedder(api_key=api_key) -embedder.run(documents=split_documents)["documents"] - -# returns {"documents": [Document(text=...), Document(text=...), ...]} -``` -After running the embedder, the list of Documents may look unchanged. However, after passing through this component, all Documents will have their `embedding` field populated with a large vector, that can then be used by the document store for retrieval. - -`OpenAIDocumentEmbedder` offers some parameters to let you customize the generation of your embeddings. To begin, you can choose the `model_name`: the default is `text-embedding-ada-002`. You can then pass the OpenAI `organization` keyword if that's necessary to authenticate. - -Two parameters, `prefix` and `suffix`, do pretty much what the name implies: they put fixed prefixes or suffixes to the document's content right before embedding it. It may be useful in some conditions to make the embedding more meaningful to the LLM, but it strongly depends on your usecase. - -`metadata_fields_to_embed` provides something even more powerful: it appends the content of the document's metadata to the text. Often the metadata of a document contains information that the text is missing, but that is crucial to understand its content. For example, the metadata can containt the date the Document was created, giving the LLM a more precise sense of time. Or if the Document is a snippet of text from a legal document, it may be talking about "the Company", the "First Party", and so on, but the definition of such terms is not the paragraph: if the metadata contains keys such as `{"title": "Certificate of Incorporation - XYZ Inc."}`, it allows the LLM to frame the content of the Document much more precisely. In short: the richer the metadata, the more useful it is to embed it together with the Document's content. - -Finally, `embedding_separator` is a small fine-tuning parameter that sets which character to use to separate the metadata fields to embed when they are appended to the Document's content. You normally don't need to use it. - -# Writing to the store - -Once all of this is done, we can finally move onto the last step of our journey: writing the Documents into our document store. Luckily, this process is really simple. We first create the document store: - -```python -from haystack.preview.document_stores import InMemoryDocumentStore - -document_store = InMemoryDocumentStore() -``` - -and then use `DocumentWriter` to actually write the documents in. - - -```python -from haystack.preview.components.writers import DocumentWriter - -writer = DocumentWriter(document_store=document_store) -writer.run(documents=documents_with_embeddings) -``` - -If you've seen my [previous post](/posts/2023-10-27-haystack-series-rag) about RAG Pipelines you may wonder: why use `DocumentWriter` when we could just call the `.write_documents()` method of our document store? - -In fact, the two methods are fully equivalent: `DocumentWriter` does nothing more than calling the `.write_documents()` method of the document store. The difference is that `DocumentWriter` is the way to go if you are using a Pipeline, which is exactly what we're going to do next. - -# Putting it all together - -We finally have all the components we need to go from a heterogeneous list of files to a document store populated with clean, short, searchable Document objects. Let's build a Pipeline to sum up this entire process: - -```python -from haystack.preview import Pipeline - -document_store = InMemoryDocumentStore() - -pipeline = Pipeline() -pipeline.add_component("router", FileTypeRouter(mime_types=["text/plain", "text/html", "application/pdf", "audio/mpeg"])) -pipeline.add_component("text_converter", TextFileToDocument()) -pipeline.add_component("html_converter", HTMLToDocument()) -pipeline.add_component("pdf_converter", PyPDFToDocument()) -pipeline.add_component("mp3_converter", RemoteWhisperTranscriber(api_key=api_key)) -pipeline.add_component("join", DocumentsJoiner()) -pipeline.add_component("cleaner", DocumentCleaner()) -pipeline.add_component("splitter", TextDocumentSplitter(split_by="sentence", split_length=5)) -pipeline.add_component("embedder", OpenAIDocumentEmbedder(api_key=api_key)) -pipeline.add_component("writer", DocumentWriter(document_store=document_store)) -pipeline.connect("router.text/plain", "text_converter") -pipeline.connect("router.text/html", "html_converter") -pipeline.connect("router.application/pdf", "pdf_converter") -pipeline.connect("router.audio/mpeg", "mp3_converter") - -pipeline.connect("text_converter", "join.text") -pipeline.connect("html_converter", "join.html") -pipeline.connect("pdf_converter", "join.pdf") -pipeline.connect("mp3_converter", "join.mp3") - -pipeline.connect("join", "cleaner") -pipeline.connect("cleaner", "splitter") -pipeline.connect("splitter", "embedder") -pipeline.connect("embedder", "writer") - -pipeline.run({ - "router": { - "sources": [ - "Republic_of_Rose_Island.txt", - "Republic_of_Rose_Island.pdf", - "Republic_of_Rose_Island.html", - "Republic_of_Rose_Island.mp3", - ] - } -}) -``` - -![Indexing Pipeline](/posts/2023-11-xx-haystack-series-minimal-indexing/indexing-pipeline.png) - -. - -. - -. - ---- - -*Next: Soon!* - -*Previous: [RAG Pipelines from scratch](/posts/2023-10-27-haystack-series-rag)* - -*See the entire series here: [Haystack 2.0 series](/series/haystack-2.0-series/)* \ No newline at end of file diff --git a/content/projects/booking-system.md b/content/projects/booking-system.md deleted file mode 100644 index 46be3ce3..00000000 --- a/content/projects/booking-system.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "CAI Sovico's Website" -description: Small website and reservations management system -date: 2016-01-01 -author: "ZanSara" -tags: [PHP, "PHP 8", Hiking, Web, JavaScript, HTML, CSS, MySQL, CAI] -featuredImage: "/projects/camerini.png" ---- - -Main website: https://www.caisovico.it - ---- - -Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, [Rifugio M. Del Grande - R. Camerini](https://maps.app.goo.gl/PwdVC82VHwdPZJDE6). I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. - -The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. It also includes an FTP server that supports a couple of [ZanzoCams](/projects/zanzocam/) and a [weather monitoring station](http://www.meteoproject.it/ftp/stazioni/caisovico/). \ No newline at end of file diff --git a/content/projects/zanzocam.md b/content/projects/zanzocam.md deleted file mode 100644 index 8eb0cb81..00000000 --- a/content/projects/zanzocam.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "ZanzoCam" -description: Remote camera for autonomous operation in isolated locations, based on Raspberry Pi. -date: 2020-01-01 -author: "ZanSara" -tags: [ZanzoCam, Python, Hiking, Web, CAI, "Raspberry Pi"] -featuredImage: "/projects/zanzocam.png" ---- - -Main website: https://zanzocam.github.io/ - ---- - -ZanzoCam is a low-power, low-frequency camera based on Raspberry Pi, designed to operate autonomously in remote locations and under harsh conditions. It was designed and developed between 2019 and 2021 for [CAI Lombardia](https://www.cai.it/gruppo_regionale/gr-lombardia/) by a team of two people, with me as the software developer and the other responsible for the hardware design. CAI later deployed several of these devices on their affiliate huts. - -ZanzoCams are designed to work reliably in the harsh conditions of alpine winters, be as power-efficient as possible, and tolerate unstable network connections: they feature a robust HTTP- or FTP-based picture upload strategy which is remotely configurable from a very simple, single-file web panel. The camera software also improves on the basic capabilities of picamera to take pictures in dark conditions, making ZanzoCams able to shoot good pictures for a few hours after sunset. - -The camera is highly configurable: photo size and frequency, server address and protocol, all the overlays (color, size, position, text and images) and several other parameters can be configured remotely without the need to expose any ports of the device to the internet. They work reliably without the need for a VPN and at the same time are quite secure by design. - -ZanzoCams mostly serve CAI and the hut managers for self-promotion, and help hikers and climbers assess the local conditions before attempting a hike. Pictures taken for this purposes are sent to [RifugiLombardia](https://www.rifugi.lombardia.it/), and you can see many of them [at this page](https://www.rifugi.lombardia.it/territorio-lombardo/webcam). - -However, it has also been used by glaciologists to monitor glacier conditions, outlook and extension over the years. [Here you can see their webcams](https://www.servizioglaciologicolombardo.it/webcam-3), some of which are ZanzoCams. - -Here is the latest picture from [Rifugio M. Del Grande - R. Camerini](https://maps.app.goo.gl/PwdVC82VHwdPZJDE6), the test location for the original prototype: - -![ZanzoCam of Rifugio M. Del Grande - R. Camerini](https://webcam.rifugi.lombardia.it/rifugio/00003157/pictures/image__0.jpg) - -And here is one of the cameras serving a local glaciology research group, [Servizio Glaciologico Lombardo](https://www.servizioglaciologicolombardo.it/): - -![ZanzoCam of M. Disgrazia](https://webcam.rifugi.lombardia.it/rifugio/90003157/pictures/image__0.jpg) - -Both of these cameras are fully solar-powered. - -ZanzoCam is fully open-source: check the [GitHub repo](https://github.com/ZanzoCam?view_as=public). Due to this decision of open-sourcing the project, I was invited by [Università di Pavia](https://portale.unipv.it/it) to hold a lecture about the project as part of their ["Hardware and Software Codesign"](http://hsw2021.gnudd.com/). Check out the slides of the lecture [here](talks/zanzocam-pavia/). \ No newline at end of file diff --git a/content/publications/msc-thesis.md b/content/publications/msc-thesis.md deleted file mode 100644 index a05bf9ac..00000000 --- a/content/publications/msc-thesis.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "Evaluation of Qt as GUI Framework for Accelerator Controls" -date: 2018-12-20 -author: "ZanSara" -tags: [CERN, Physics, Python, GUI, Qt, PyQt, JavaFX] -featuredImage: "/publications/msc-thesis.png" ---- - -This is the full-text of my MSc thesis, written in collaboration with -[Politecnico di Milano](https://www.polimi.it/) and [CERN](https://home.cern/). - ---- - -Get the full text here: [Evaluation of Qt as GUI Framework for Accelerator Controls](/publications/msc-thesis.pdf) - -Publisher's entry: [10589/144860](https://hdl.handle.net/10589/144860). \ No newline at end of file diff --git a/content/publications/thpv014.md b/content/publications/thpv014.md deleted file mode 100644 index a4066d24..00000000 --- a/content/publications/thpv014.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "Adopting PyQt For Beam Instrumentation GUI Development At CERN" -date: 2022-03-01 -author: "ZanSara" -tags: [CERN, Physics, Python, GUI, Qt, PyQt, JavaFX] -featuredImage: "/publications/thpv014.png" ---- - -## Abstract - -As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. To conclude, the challenges we anticipate for the BI GUI developer community in adopting this new technology are also discussed. - ---- - -Get the full text here: [Adopting PyQt For Beam Instrumentation GUI Development At CERN](/publications/thpv014.pdf) - -Get the poster: [PDF](/publications/thpv014-poster.pdf) - -Publisher's entry: [THPV014](https://accelconf.web.cern.ch/icalepcs2021/doi/JACoW-ICALEPCS2021-THPV014.html) \ No newline at end of file diff --git a/content/publications/thpv042.md b/content/publications/thpv042.md deleted file mode 100644 index 1b43b36f..00000000 --- a/content/publications/thpv042.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF)" -date: 2021-12-11 -author: "ZanSara" -tags: [CERN, Physics, Python, "Data Science"] -featuredImage: "/publications/thpv042.png" ---- - -## Abstract - -The CERN accelerators require a large number of instruments, measuring different beam parameters like position, losses, current etc. The instruments’ associated electronics and software also produce information about their status. All these data are stored in a database for later analysis. The Beam Instrumentation group developed the Offline Analysis Framework some years ago to regularly and systematically analyze these data. The framework has been successfully used for nearly 100 different analyses that ran regularly by the end of the LHC run 2. Currently it is being updated for run 3 with modern and efficient tools to improve its usability and data analysis power. In particular, the architecture has been reviewed to have a modular design to facilitate the maintenance and the future evolution of the tool. A new web based application is being developed to facilitate the users’ access both to online configuration and to results. This paper will describe all these evolutions and outline possible lines of work for further improvements. - ---- - -Get the full text here: [Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF)](/publications/thpv042.pdf) - -Publisher's entry: [THPV042](https://accelconf.web.cern.ch/icalepcs2021/doi/JACoW-ICALEPCS2021-THPV042.html). diff --git a/content/publications/tucpr03.md b/content/publications/tucpr03.md deleted file mode 100644 index e4c8bf8f..00000000 --- a/content/publications/tucpr03.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs" -date: 2020-08-30 -author: "ZanSara" -tags: [CERN, Physics, Python, GUI, Qt, PyQt, JavaFX] -featuredImage: "/publications/tucpr03.png" ---- - -## Abstract - -For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. This was a wakeup call for us. We took the opportunity to reconsider all technical options for developing operational GUIs. Our options ranged from sticking with JavaFX, over using the Qt framework (either using PyQt or developing our own Java Bindings to Qt), to using Web technology both in a browser and in native desktop applications. This article explains the reasons for moving away from Java as the main GUI technology and describes the analysis and hands-on evaluations that we went through before choosing the replacement. - ---- - -Get the full text here: [Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs](/publications/tucpr03.pdf) - -Publisher's entry: [TUCPR03](https://accelconf.web.cern.ch/icalepcs2019/doi/JACoW-ICALEPCS2019-TUCPR03.html). diff --git a/content/talks/2021-05-24-zanzocam-pavia.md b/content/talks/2021-05-24-zanzocam-pavia.md deleted file mode 100644 index 9223f1cd..00000000 --- a/content/talks/2021-05-24-zanzocam-pavia.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "ZanzoCam: An open-source alpine web camera" -date: 2021-05-24 -author: "ZanSara" -tags: [ZanzoCam, Python, Hiking, Web, CAI, "Raspberry Pi"] -featuredImage: "/talks/2021-05-24-zanzocam-pavia.png" ---- - -Slides: [ZanzoCam: An open-source alpine web camera](/talks/2021-05-24-zanzocam-pavia.pdf) - ---- - -On May 24th 2021 I held a talk about the [ZanzoCam project](https://zanzocam.github.io/en) -as invited speaker for the ["Hardware and Software Codesign"](http://hsw2021.gnudd.com/) course at -[Università di Pavia](https://portale.unipv.it/it). - -The slides go through the entire lifecycle of the [ZanzoCam project](https://zanzocam.github.io/en), -from the very inception of it, the market research, our decision process, earlier prototypes, and -then goes into a more detailed explanation of the the design and implementation of the project from -a hardware and software perspective, with some notes about our financial situation and project management. diff --git a/content/talks/2022-12-01-open-nlp-meetup.md b/content/talks/2022-12-01-open-nlp-meetup.md deleted file mode 100644 index e634c0ea..00000000 --- a/content/talks/2022-12-01-open-nlp-meetup.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: "A Practical Introduction to Image Retrieval" -date: 2022-12-01 -author: "ZanSara" -tags: [Haystack, LLM, NLP, Python, AI, Retrieval, Images, Colab, "Multi Modality", "Text to Image", "OpenNLP Meetup"] -featuredImage: "/talks/2022-12-01-open-nlp-meetup.png" ---- - -Youtube: [Open NLP meetup #7](https://www.youtube.com/watch?v=7Idjl3OR0FY) - -Slides: [A Practical Introduction to Image Retrieval](https://gist.github.com/ZanSara/dc4b22e7ffe2a56647e0afba7537c46b) - -Colab: [MultiModalRetriever - Live coding](https://gist.github.com/ZanSara/9e8557830cc866fcf43a2c5623688c74) - -All the material can also be found [here](https://drive.google.com/drive/folders/1_3b8PsvykHeM0jSHsMUWQ-4h_VADutcX?usp=drive_link). - ---- - -## A Practical Introduction to Image Retrieval - -*by Sara Zanzottera from deepset* - -Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. Text-to-text generation models like GPT now have their counterparts in text-to-image models, like Stable Diffusion. But what about search? In this talk we're going to experiment with CLIP, a text-to-image search model, to look for animals matching specific characteristics in a dataset of pictures. Does CLIP know which one is "The fastest animal in the world"? - ---- - -For the 7th [OpenNLP meetup](https://www.meetup.com/open-nlp-meetup/) I presented the topic of Image Retrieval, a feature that I've recently added to Haystack in the form of a [MultiModal Retriever](https://docs.haystack.deepset.ai/docs/retriever#multimodal-retrieval) (see the [Tutorial](https://haystack.deepset.ai/tutorials/19_text_to_image_search_pipeline_with_multimodal_retriever)). - -The talk consists of 5 parts: - -- An introduction of the topic of Image Retrieval -- A mention of the current SOTA model (CLIP) -- An overview of Haystack -- A step-by-step description of how image retrieval applications can be implemented with Haystack -- A live coding session where I start from a blank Colab notebook and build a fully working image retrieval system from the ground up, to the point where I can run queries live. - -Towards the end I mention briefly an even more advanced version of this image retrieval system, which I had no time to implement live. However, I later built a notebook implementing such system and you can find it here: [Cheetah.ipynb](https://gist.github.com/ZanSara/31ed3fc8252bb74b1952f2d0fe253ed0) - -The slides were generated from the linked Jupyter notebook with `jupyter nbconvert Dec_1st_OpenNLP_Meetup.ipynb --to slides --post serve`. diff --git a/content/talks/2023-08-03-office-hours-haystack-2.0-status.md b/content/talks/2023-08-03-office-hours-haystack-2.0-status.md deleted file mode 100644 index 215544bf..00000000 --- a/content/talks/2023-08-03-office-hours-haystack-2.0-status.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Office Hours: Haystack 2.0" -date: 2023-08-03 -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, LLM, NLP, Python, AI, "Office Hours"] -featuredImage: "/talks/2023-08-03-office-hours-haystack-2.0-status.png" ---- - -Recording: [Haystack v2 - Office Hours](https://drive.google.com/file/d/1PyAlvJ22Z6o1bls07Do5kx2WMTdotsM7/view?usp=drive_link) - -Slides: [Haystack v2 - Status Update](https://drive.google.com/file/d/1QFNisUk2HzwRL_27bpr338maxLvDBr9D/preview) - -All the material can also be found [here](https://drive.google.com/drive/folders/1zmXwxsSgqDgvYf2ptjHocdtzOroqaudw?usp=drive_link). - ---- - -In this [Office Hours](https://discord.com/invite/VBpFzsgRVF) I've presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a `preview` module in the latest Haystack 1.x releases, we took the opportunity to share this early draft of the project to collect early feedback. - -Haystack 2.0 is a total rewrite that rethinks many of the core concepts of the framework and makes LLMs support its primary concern, but makes sure to support all the usecases its predecessor enabled. The rewrite addresses some well-know, old issues about the pipeline's design, the relationship between the pipeline, its components, and the document stores, and aims at improving drastically the developer experience and the framework's extensibility. - -As the main designer of this rewrite, I walked the community through a slightly re-hashed version of the slide deck I've presented internally just a few days earlier in an All Hands on the same topic. \ No newline at end of file diff --git a/content/talks/2023-10-12-office-hours-rag-pipelines.md b/content/talks/2023-10-12-office-hours-rag-pipelines.md deleted file mode 100644 index 28ca6697..00000000 --- a/content/talks/2023-10-12-office-hours-rag-pipelines.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Office Hours: RAG Pipelines" -date: 2023-10-12 -author: "ZanSara" -tags: ["Haystack 2.0", Haystack, LLM, NLP, Python, AI, RAG, "Office Hours"] -featuredImage: "/talks/2023-10-12-office-hours-rag-pipelines.png" ---- - -Recording: [Office Hours - RAG Pipelines](https://drive.google.com/file/d/1UXGi4raiCQmrxOfOexL-Qh0CVbtiSm89/view?usp=drive_link) - -Notebook: [RAG_Pipelines.ipynb](https://gist.github.com/ZanSara/5975901eea972c126f8e1c2341686dfb) - -All the material can also be found [here](https://drive.google.com/drive/folders/17CIfoy6c4INs0O_X6YCa3CYXkjRvWm7X?usp=drive_link). - ---- - -In this [Office Hours](https://discord.com/invite/VBpFzsgRVF) I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. - -In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. - -The talk indirectly shows also how Pipelines can help users compose these systems quickly, to visualize them, and helps them connect together different parts by producing verbose error messages. \ No newline at end of file diff --git a/css/coder-dark.min.444027263620296e25d16bc7d683df544ee56a148dafabbd903f464d823a1248.css b/css/coder-dark.min.444027263620296e25d16bc7d683df544ee56a148dafabbd903f464d823a1248.css new file mode 100644 index 00000000..10207950 --- /dev/null +++ b/css/coder-dark.min.444027263620296e25d16bc7d683df544ee56a148dafabbd903f464d823a1248.css @@ -0,0 +1 @@ +@font-face{font-family:baskerville;src:url(/fonts/Libre_Baskerville/LibreBaskerville-Regular.ttf)format("truetype");font-weight:"regular";font-style:"regular"}@font-face{font-family:baskerville-bold;src:url(/fonts/Libre_Baskerville/LibreBaskerville-Bold.ttf)format("truetype");font-weight:"bold";font-style:"bold"}@font-face{font-family:baskerville-italic;src:url(/fonts/Libre_Baskerville/LibreBaskerville-Italic.ttf)format("truetype");font-weight:"regular";font-style:"regular"}body.colorscheme-dark{color:#dadada;background-color:#212121}body.colorscheme-dark a{color:#42a5f5}body.colorscheme-dark h1,body.colorscheme-dark h2,body.colorscheme-dark h3,body.colorscheme-dark h4,body.colorscheme-dark h5,body.colorscheme-dark h6{color:#dadada}body.colorscheme-dark h1:hover .heading-link,body.colorscheme-dark h2:hover .heading-link,body.colorscheme-dark h3:hover .heading-link,body.colorscheme-dark h4:hover .heading-link,body.colorscheme-dark h5:hover .heading-link,body.colorscheme-dark h6:hover .heading-link{visibility:visible}body.colorscheme-dark h1 .heading-link,body.colorscheme-dark h2 .heading-link,body.colorscheme-dark h3 .heading-link,body.colorscheme-dark h4 .heading-link,body.colorscheme-dark h5 .heading-link,body.colorscheme-dark h6 .heading-link{color:#42a5f5;font-weight:inherit;text-decoration:none;font-size:80%;visibility:hidden}body.colorscheme-dark h1 .title-link,body.colorscheme-dark h2 .title-link,body.colorscheme-dark h3 .title-link,body.colorscheme-dark h4 .title-link,body.colorscheme-dark h5 .title-link,body.colorscheme-dark h6 .title-link{color:inherit;font-weight:inherit;text-decoration:none}body.colorscheme-dark pre code{background-color:inherit;color:inherit}body.colorscheme-dark code{background-color:#4f4f4f;color:#dadada}body.colorscheme-dark blockquote{border-left:2px solid #424242}body.colorscheme-dark th,body.colorscheme-dark td{padding:1.6rem}body.colorscheme-dark table{border-collapse:collapse}body.colorscheme-dark table td,body.colorscheme-dark table th{border:2px solid #dadada}body.colorscheme-dark table tr:first-child th{border-top:0}body.colorscheme-dark table tr:last-child td{border-bottom:0}body.colorscheme-dark table tr td:first-child,body.colorscheme-dark table tr th:first-child{border-left:0}body.colorscheme-dark table tr td:last-child,body.colorscheme-dark table tr th:last-child{border-right:0}@media(prefers-color-scheme:dark){body.colorscheme-auto{color:#dadada;background-color:#212121}body.colorscheme-auto a{color:#42a5f5}body.colorscheme-auto h1,body.colorscheme-auto h2,body.colorscheme-auto h3,body.colorscheme-auto h4,body.colorscheme-auto h5,body.colorscheme-auto h6{color:#dadada}body.colorscheme-auto h1:hover .heading-link,body.colorscheme-auto h2:hover .heading-link,body.colorscheme-auto h3:hover .heading-link,body.colorscheme-auto h4:hover .heading-link,body.colorscheme-auto h5:hover .heading-link,body.colorscheme-auto h6:hover .heading-link{visibility:visible}body.colorscheme-auto h1 .heading-link,body.colorscheme-auto h2 .heading-link,body.colorscheme-auto h3 .heading-link,body.colorscheme-auto h4 .heading-link,body.colorscheme-auto h5 .heading-link,body.colorscheme-auto h6 .heading-link{color:#42a5f5;font-weight:inherit;text-decoration:none;font-size:80%;visibility:hidden}body.colorscheme-auto h1 .title-link,body.colorscheme-auto h2 .title-link,body.colorscheme-auto h3 .title-link,body.colorscheme-auto h4 .title-link,body.colorscheme-auto h5 .title-link,body.colorscheme-auto h6 .title-link{color:inherit;font-weight:inherit;text-decoration:none}body.colorscheme-auto pre code{background-color:inherit;color:inherit}body.colorscheme-auto code{background-color:#4f4f4f;color:#dadada}body.colorscheme-auto blockquote{border-left:2px solid #424242}body.colorscheme-auto th,body.colorscheme-auto td{padding:1.6rem}body.colorscheme-auto table{border-collapse:collapse}body.colorscheme-auto table td,body.colorscheme-auto table th{border:2px solid #dadada}body.colorscheme-auto table tr:first-child th{border-top:0}body.colorscheme-auto table tr:last-child td{border-bottom:0}body.colorscheme-auto table tr td:first-child,body.colorscheme-auto table tr th:first-child{border-left:0}body.colorscheme-auto table tr td:last-child,body.colorscheme-auto table tr th:last-child{border-right:0}}body.colorscheme-dark .content .post .tags .tag{background-color:#424242}body.colorscheme-dark .content .post .tags .tag a{color:#dadada}body.colorscheme-dark .content .post .tags .tag a:active{color:#dadada}body.colorscheme-dark .content .list ul li .title{color:#dadada}body.colorscheme-dark .content .list ul li .title:hover,body.colorscheme-dark .content .list ul li .title:focus{color:#42a5f5}body.colorscheme-dark .content .centered .about ul li a{color:#dadada}body.colorscheme-dark .content .centered .about ul li a:hover,body.colorscheme-dark .content .centered .about ul li a:focus{color:#42a5f5}@media(prefers-color-scheme:dark){body.colorscheme-auto .content .post .tags .tag{background-color:#424242}body.colorscheme-auto .content .post .tags .tag a{color:#dadada}body.colorscheme-auto .content .post .tags .tag a:active{color:#dadada}body.colorscheme-auto .content .list ul li .title{color:#dadada}body.colorscheme-auto .content .list ul li .title:hover,body.colorscheme-auto .content .list ul li .title:focus{color:#42a5f5}body.colorscheme-auto .content .centered .about ul li a{color:#dadada}body.colorscheme-auto .content .centered .about ul li a:hover,body.colorscheme-auto .content .centered .about ul li a:focus{color:#42a5f5}}body.colorscheme-dark .notice .notice-title{border-bottom:1px solid #212121}@media(prefers-color-scheme:dark){body.colorscheme-auto .notice .notice-title{border-bottom:1px solid #212121}}body.colorscheme-dark .navigation a,body.colorscheme-dark .navigation span{color:#dadada}body.colorscheme-dark .navigation a:hover,body.colorscheme-dark .navigation a:focus{color:#42a5f5}@media only screen and (max-width:768px){body.colorscheme-dark .navigation .navigation-list{background-color:#212121;border-top:solid 2px #424242;border-bottom:solid 2px #424242}}@media only screen and (max-width:768px){body.colorscheme-dark .navigation .navigation-list .menu-separator{border-top:2px solid #dadada}}@media only screen and (max-width:768px){body.colorscheme-dark .navigation #menu-toggle:checked+label>i{color:#424242}}body.colorscheme-dark .navigation i{color:#dadada}body.colorscheme-dark .navigation i:hover,body.colorscheme-dark .navigation i:focus{color:#42a5f5}body.colorscheme-dark .navigation .menu-button i:hover,body.colorscheme-dark .navigation .menu-button i:focus{color:#dadada}@media(prefers-color-scheme:dark){body.colorscheme-auto .navigation a,body.colorscheme-auto .navigation span{color:#dadada}body.colorscheme-auto .navigation a:hover,body.colorscheme-auto .navigation a:focus{color:#42a5f5}}@media only screen and (prefers-color-scheme:dark) and (max-width:768px){body.colorscheme-auto .navigation .navigation-list{background-color:#212121;border-top:solid 2px #424242;border-bottom:solid 2px #424242}}@media only screen and (prefers-color-scheme:dark) and (max-width:768px){body.colorscheme-auto .navigation .navigation-list .menu-separator{border-top:2px solid #dadada}}@media only screen and (prefers-color-scheme:dark) and (max-width:768px){body.colorscheme-auto .navigation #menu-toggle:checked+label>i{color:#424242}}@media(prefers-color-scheme:dark){body.colorscheme-auto .navigation i{color:#dadada}body.colorscheme-auto .navigation i:hover,body.colorscheme-auto .navigation i:focus{color:#42a5f5}body.colorscheme-auto .navigation .menu-button i:hover,body.colorscheme-auto .navigation .menu-button i:focus{color:#dadada}}body.colorscheme-dark .tabs label.tab-label{background-color:#424242;border-color:#4f4f4f}body.colorscheme-dark .tabs input.tab-input:checked+label.tab-label{background-color:#212121}body.colorscheme-dark .tabs .tab-content{background-color:#212121;border-color:#4f4f4f}@media(prefers-color-scheme:dark){body.colorscheme-auto .tabs label.tab-label{background-color:#424242;border-color:#4f4f4f}body.colorscheme-auto .tabs input.tab-input:checked+label.tab-label{background-color:#212121}body.colorscheme-auto .tabs .tab-content{background-color:#212121;border-color:#4f4f4f}}body.colorscheme-dark .taxonomy-element{background-color:#424242}body.colorscheme-dark .taxonomy-element a{color:#dadada}body.colorscheme-dark .taxonomy-element a:active{color:#dadada}@media(prefers-color-scheme:dark){body.colorscheme-auto .taxonomy-element{background-color:#424242}body.colorscheme-auto .taxonomy-element a{color:#dadada}body.colorscheme-auto .taxonomy-element a:active{color:#dadada}}body.colorscheme-dark .footer a{color:#42a5f5}@media(prefers-color-scheme:dark){body.colorscheme-auto .footer a{color:#42a5f5}}body.colorscheme-dark .float-container a{color:#dadada;background-color:#424242}body.colorscheme-dark .float-container a:hover,body.colorscheme-dark .float-container a:focus{color:#42a5f5}@media only screen and (max-width:768px){body.colorscheme-dark .float-container a:hover,body.colorscheme-dark .float-container a:focus{color:#dadada}}@media(prefers-color-scheme:dark){body.colorscheme-auto .float-container a{color:#dadada;background-color:#424242}body.colorscheme-auto .float-container a:hover,body.colorscheme-auto .float-container a:focus{color:#42a5f5}}@media only screen and (prefers-color-scheme:dark) and (max-width:768px){body.colorscheme-auto .float-container a:hover,body.colorscheme-auto .float-container a:focus{color:#dadada}}body.colorscheme-dark{}body.colorscheme-dark .bg{color:#c9d1d9;background-color:#0d1117}body.colorscheme-dark .chroma{color:#c9d1d9;background-color:#0d1117}body.colorscheme-dark .chroma .err{color:#f85149}body.colorscheme-dark .chroma .lnlinks{outline:none;text-decoration:none;color:inherit}body.colorscheme-dark .chroma .lntd{vertical-align:top;padding:0;margin:0;border:0}body.colorscheme-dark .chroma .lntable{border-spacing:0;padding:0;margin:0;border:0}body.colorscheme-dark .chroma .hl{background-color:#ffc}body.colorscheme-dark .chroma .lnt{white-space:pre;user-select:none;margin-right:.4em;padding:0 .4em;color:#64686c}body.colorscheme-dark .chroma .ln{white-space:pre;user-select:none;margin-right:.4em;padding:0 .4em;color:#6e7681}body.colorscheme-dark .chroma .line{display:flex}body.colorscheme-dark .chroma .k{color:#ff7b72}body.colorscheme-dark .chroma .kc{color:#79c0ff}body.colorscheme-dark .chroma .kd{color:#ff7b72}body.colorscheme-dark .chroma .kn{color:#ff7b72}body.colorscheme-dark .chroma .kp{color:#79c0ff}body.colorscheme-dark .chroma .kr{color:#ff7b72}body.colorscheme-dark .chroma .kt{color:#ff7b72}body.colorscheme-dark .chroma .nc{color:#f0883e;font-weight:700}body.colorscheme-dark .chroma .no{color:#79c0ff;font-weight:700}body.colorscheme-dark .chroma .nd{color:#d2a8ff;font-weight:700}body.colorscheme-dark .chroma .ni{color:#ffa657}body.colorscheme-dark .chroma .ne{color:#f0883e;font-weight:700}body.colorscheme-dark .chroma .nf{color:#d2a8ff;font-weight:700}body.colorscheme-dark .chroma .nl{color:#79c0ff;font-weight:700}body.colorscheme-dark .chroma .nn{color:#ff7b72}body.colorscheme-dark .chroma .py{color:#79c0ff}body.colorscheme-dark .chroma .nt{color:#7ee787}body.colorscheme-dark .chroma .nv{color:#79c0ff}body.colorscheme-dark .chroma .l{color:#a5d6ff}body.colorscheme-dark .chroma .ld{color:#79c0ff}body.colorscheme-dark .chroma .s{color:#a5d6ff}body.colorscheme-dark .chroma .sa{color:#79c0ff}body.colorscheme-dark .chroma .sb{color:#a5d6ff}body.colorscheme-dark .chroma .sc{color:#a5d6ff}body.colorscheme-dark .chroma .dl{color:#79c0ff}body.colorscheme-dark .chroma .sd{color:#a5d6ff}body.colorscheme-dark .chroma .s2{color:#a5d6ff}body.colorscheme-dark .chroma .se{color:#79c0ff}body.colorscheme-dark .chroma .sh{color:#79c0ff}body.colorscheme-dark .chroma .si{color:#a5d6ff}body.colorscheme-dark .chroma .sx{color:#a5d6ff}body.colorscheme-dark .chroma .sr{color:#79c0ff}body.colorscheme-dark .chroma .s1{color:#a5d6ff}body.colorscheme-dark .chroma .ss{color:#a5d6ff}body.colorscheme-dark .chroma .m{color:#a5d6ff}body.colorscheme-dark .chroma .mb{color:#a5d6ff}body.colorscheme-dark .chroma .mf{color:#a5d6ff}body.colorscheme-dark .chroma .mh{color:#a5d6ff}body.colorscheme-dark .chroma .mi{color:#a5d6ff}body.colorscheme-dark .chroma .il{color:#a5d6ff}body.colorscheme-dark .chroma .mo{color:#a5d6ff}body.colorscheme-dark .chroma .o{color:#ff7b72;font-weight:700}body.colorscheme-dark .chroma .ow{color:#ff7b72;font-weight:700}body.colorscheme-dark .chroma .c{color:#8b949e;font-style:italic}body.colorscheme-dark .chroma .ch{color:#8b949e;font-style:italic}body.colorscheme-dark .chroma .cm{color:#8b949e;font-style:italic}body.colorscheme-dark .chroma .c1{color:#8b949e;font-style:italic}body.colorscheme-dark .chroma .cs{color:#8b949e;font-weight:700;font-style:italic}body.colorscheme-dark .chroma .cp{color:#8b949e;font-weight:700;font-style:italic}body.colorscheme-dark .chroma .cpf{color:#8b949e;font-weight:700;font-style:italic}body.colorscheme-dark .chroma .gd{color:#ffa198;background-color:#490202}body.colorscheme-dark .chroma .ge{font-style:italic}body.colorscheme-dark .chroma .gr{color:#ffa198}body.colorscheme-dark .chroma .gh{color:#79c0ff;font-weight:700}body.colorscheme-dark .chroma .gi{color:#56d364;background-color:#0f5323}body.colorscheme-dark .chroma .go{color:#8b949e}body.colorscheme-dark .chroma .gp{color:#8b949e}body.colorscheme-dark .chroma .gs{font-weight:700}body.colorscheme-dark .chroma .gu{color:#79c0ff}body.colorscheme-dark .chroma .gt{color:#ff7b72}body.colorscheme-dark .chroma .gl{text-decoration:underline}body.colorscheme-dark .chroma .w{color:#6e7681}@media(prefers-color-scheme:dark){body.colorscheme-auto{}body.colorscheme-auto .bg{color:#c9d1d9;background-color:#0d1117}body.colorscheme-auto .chroma{color:#c9d1d9;background-color:#0d1117}body.colorscheme-auto .chroma .err{color:#f85149}body.colorscheme-auto .chroma .lnlinks{outline:none;text-decoration:none;color:inherit}body.colorscheme-auto .chroma .lntd{vertical-align:top;padding:0;margin:0;border:0}body.colorscheme-auto .chroma .lntable{border-spacing:0;padding:0;margin:0;border:0}body.colorscheme-auto .chroma .hl{background-color:#ffc}body.colorscheme-auto .chroma .lnt{white-space:pre;user-select:none;margin-right:.4em;padding:0 .4em;color:#64686c}body.colorscheme-auto .chroma .ln{white-space:pre;user-select:none;margin-right:.4em;padding:0 .4em;color:#6e7681}body.colorscheme-auto .chroma .line{display:flex}body.colorscheme-auto .chroma .k{color:#ff7b72}body.colorscheme-auto .chroma .kc{color:#79c0ff}body.colorscheme-auto .chroma .kd{color:#ff7b72}body.colorscheme-auto .chroma .kn{color:#ff7b72}body.colorscheme-auto .chroma .kp{color:#79c0ff}body.colorscheme-auto .chroma .kr{color:#ff7b72}body.colorscheme-auto .chroma .kt{color:#ff7b72}body.colorscheme-auto .chroma .nc{color:#f0883e;font-weight:700}body.colorscheme-auto .chroma .no{color:#79c0ff;font-weight:700}body.colorscheme-auto .chroma .nd{color:#d2a8ff;font-weight:700}body.colorscheme-auto .chroma .ni{color:#ffa657}body.colorscheme-auto .chroma .ne{color:#f0883e;font-weight:700}body.colorscheme-auto .chroma .nf{color:#d2a8ff;font-weight:700}body.colorscheme-auto .chroma .nl{color:#79c0ff;font-weight:700}body.colorscheme-auto .chroma .nn{color:#ff7b72}body.colorscheme-auto .chroma .py{color:#79c0ff}body.colorscheme-auto .chroma .nt{color:#7ee787}body.colorscheme-auto .chroma .nv{color:#79c0ff}body.colorscheme-auto .chroma .l{color:#a5d6ff}body.colorscheme-auto .chroma .ld{color:#79c0ff}body.colorscheme-auto .chroma .s{color:#a5d6ff}body.colorscheme-auto .chroma .sa{color:#79c0ff}body.colorscheme-auto .chroma .sb{color:#a5d6ff}body.colorscheme-auto .chroma .sc{color:#a5d6ff}body.colorscheme-auto .chroma .dl{color:#79c0ff}body.colorscheme-auto .chroma .sd{color:#a5d6ff}body.colorscheme-auto .chroma .s2{color:#a5d6ff}body.colorscheme-auto .chroma .se{color:#79c0ff}body.colorscheme-auto .chroma .sh{color:#79c0ff}body.colorscheme-auto .chroma .si{color:#a5d6ff}body.colorscheme-auto .chroma .sx{color:#a5d6ff}body.colorscheme-auto .chroma .sr{color:#79c0ff}body.colorscheme-auto .chroma .s1{color:#a5d6ff}body.colorscheme-auto .chroma .ss{color:#a5d6ff}body.colorscheme-auto .chroma .m{color:#a5d6ff}body.colorscheme-auto .chroma .mb{color:#a5d6ff}body.colorscheme-auto .chroma .mf{color:#a5d6ff}body.colorscheme-auto .chroma .mh{color:#a5d6ff}body.colorscheme-auto .chroma .mi{color:#a5d6ff}body.colorscheme-auto .chroma .il{color:#a5d6ff}body.colorscheme-auto .chroma .mo{color:#a5d6ff}body.colorscheme-auto .chroma .o{color:#ff7b72;font-weight:700}body.colorscheme-auto .chroma .ow{color:#ff7b72;font-weight:700}body.colorscheme-auto .chroma .c{color:#8b949e;font-style:italic}body.colorscheme-auto .chroma .ch{color:#8b949e;font-style:italic}body.colorscheme-auto .chroma .cm{color:#8b949e;font-style:italic}body.colorscheme-auto .chroma .c1{color:#8b949e;font-style:italic}body.colorscheme-auto .chroma .cs{color:#8b949e;font-weight:700;font-style:italic}body.colorscheme-auto .chroma .cp{color:#8b949e;font-weight:700;font-style:italic}body.colorscheme-auto .chroma .cpf{color:#8b949e;font-weight:700;font-style:italic}body.colorscheme-auto .chroma .gd{color:#ffa198;background-color:#490202}body.colorscheme-auto .chroma .ge{font-style:italic}body.colorscheme-auto .chroma .gr{color:#ffa198}body.colorscheme-auto .chroma .gh{color:#79c0ff;font-weight:700}body.colorscheme-auto .chroma .gi{color:#56d364;background-color:#0f5323}body.colorscheme-auto .chroma .go{color:#8b949e}body.colorscheme-auto .chroma .gp{color:#8b949e}body.colorscheme-auto .chroma .gs{font-weight:700}body.colorscheme-auto .chroma .gu{color:#79c0ff}body.colorscheme-auto .chroma .gt{color:#ff7b72}body.colorscheme-auto .chroma .gl{text-decoration:underline}body.colorscheme-auto .chroma .w{color:#6e7681}} \ No newline at end of file diff --git a/css/coder.min.610348c09e525384da3565602b203c3a844dd0ad5a877e04601e9f20cae5466e.css b/css/coder.min.610348c09e525384da3565602b203c3a844dd0ad5a877e04601e9f20cae5466e.css new file mode 100644 index 00000000..cebce4ab --- /dev/null +++ b/css/coder.min.610348c09e525384da3565602b203c3a844dd0ad5a877e04601e9f20cae5466e.css @@ -0,0 +1,6 @@ +@charset "UTF-8";/*!normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css*/html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent;word-wrap:break-word}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button}button::-moz-focus-inner,[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner{border-style:none;padding:0}button:-moz-focusring,[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}template{display:none}[hidden]{display:none}/*!Fork Awesome 1.2.0 +License - https://forkaweso.me/Fork-Awesome/license +Copyright 2018 Dave Gandy & Fork Awesome +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.*/@font-face{font-family:forkawesome;src:url(../fonts/forkawesome-webfont.eot?v=1.2.0);src:url(../fonts/forkawesome-webfont.eot?#iefix&v=1.2.0)format("embedded-opentype"),url(../fonts/forkawesome-webfont.woff2?v=1.2.0)format("woff2"),url(../fonts/forkawesome-webfont.woff?v=1.2.0)format("woff"),url(../fonts/forkawesome-webfont.ttf?v=1.2.0)format("truetype"),url(../fonts/forkawesome-webfont.svg?v=1.2.0#forkawesomeregular)format("svg");font-weight:400;font-style:normal;font-display:block}.fa{display:inline-block;font:14px/1 ForkAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1,1);-ms-transform:scale(-1,1);transform:scale(-1,1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1,-1);-ms-transform:scale(1,-1);transform:scale(1,-1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-remove:before,.fa-close:before,.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-sync:before,.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video:before,.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook-f:before,.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-feed:before,.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell-o:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-globe-e:before{content:"\f304"}.fa-globe-w:before{content:"\f305"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-community:before,.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus-g:before,.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-utensils:before,.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-pound:before,.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-down:before,.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-up:before,.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-down:before,.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-up:before,.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-down:before,.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-up:before,.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before,.fa-gratipay:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-utensil-spoon:before,.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-circle-thin:before{content:"\f1db"}.fa-heading:before,.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:"\f1e3"}.fa-tty:before{content:"\f1e4"}.fa-binoculars:before{content:"\f1e5"}.fa-plug:before{content:"\f1e6"}.fa-slideshare:before{content:"\f1e7"}.fa-twitch:before{content:"\f1e8"}.fa-yelp:before{content:"\f1e9"}.fa-newspaper-o:before{content:"\f1ea"}.fa-wifi:before{content:"\f1eb"}.fa-calculator:before{content:"\f1ec"}.fa-paypal:before{content:"\f1ed"}.fa-google-wallet:before{content:"\f1ee"}.fa-cc-visa:before{content:"\f1f0"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-bell-slash:before{content:"\f1f6"}.fa-bell-slash-o:before{content:"\f1f7"}.fa-trash:before{content:"\f1f8"}.fa-copyright:before{content:"\f1f9"}.fa-at:before{content:"\f1fa"}.fa-eyedropper:before{content:"\f1fb"}.fa-paint-brush:before{content:"\f1fc"}.fa-birthday-cake:before{content:"\f1fd"}.fa-area-chart:before{content:"\f1fe"}.fa-pie-chart:before{content:"\f200"}.fa-line-chart:before{content:"\f201"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-bicycle:before{content:"\f206"}.fa-bus:before{content:"\f207"}.fa-ioxhost:before{content:"\f208"}.fa-angellist:before{content:"\f209"}.fa-closed-captioning:before,.fa-cc:before{content:"\f20a"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:"\f20b"}.fa-meanpath:before{content:"\f20c"}.fa-buysellads:before{content:"\f20d"}.fa-connectdevelop:before{content:"\f20e"}.fa-dashcube:before{content:"\f210"}.fa-forumbee:before{content:"\f211"}.fa-leanpub:before{content:"\f212"}.fa-sellsy:before{content:"\f213"}.fa-shirtsinbulk:before{content:"\f214"}.fa-simplybuilt:before{content:"\f215"}.fa-skyatlas:before{content:"\f216"}.fa-cart-plus:before{content:"\f217"}.fa-cart-arrow-down:before{content:"\f218"}.fa-gem:before,.fa-diamond:before{content:"\f219"}.fa-ship:before{content:"\f21a"}.fa-user-secret:before{content:"\f21b"}.fa-motorcycle:before{content:"\f21c"}.fa-street-view:before{content:"\f21d"}.fa-heartbeat:before{content:"\f21e"}.fa-venus:before{content:"\f221"}.fa-mars:before{content:"\f222"}.fa-mercury:before{content:"\f223"}.fa-intersex:before,.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-venus-double:before{content:"\f226"}.fa-mars-double:before{content:"\f227"}.fa-venus-mars:before{content:"\f228"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-neuter:before{content:"\f22c"}.fa-genderless:before{content:"\f22d"}.fa-facebook-official:before{content:"\f230"}.fa-pinterest-p:before{content:"\f231"}.fa-whatsapp:before{content:"\f232"}.fa-server:before{content:"\f233"}.fa-user-plus:before{content:"\f234"}.fa-user-times:before{content:"\f235"}.fa-hotel:before,.fa-bed:before{content:"\f236"}.fa-viacoin:before{content:"\f237"}.fa-train:before{content:"\f238"}.fa-subway:before{content:"\f239"}.fa-medium:before{content:"\f23a"}.fa-medium-square:before{content:"\f2f8"}.fa-yc:before,.fa-y-combinator:before{content:"\f23b"}.fa-optin-monster:before{content:"\f23c"}.fa-opencart:before{content:"\f23d"}.fa-expeditedssl:before{content:"\f23e"}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:"\f240"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"\f241"}.fa-battery-2:before,.fa-battery-half:before{content:"\f242"}.fa-battery-1:before,.fa-battery-quarter:before{content:"\f243"}.fa-battery-0:before,.fa-battery-empty:before{content:"\f244"}.fa-mouse-pointer:before{content:"\f245"}.fa-i-cursor:before{content:"\f246"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-sticky-note:before{content:"\f249"}.fa-sticky-note-o:before{content:"\f24a"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-diners-club:before{content:"\f24c"}.fa-clone:before{content:"\f24d"}.fa-balance-scale:before{content:"\f24e"}.fa-hourglass-o:before{content:"\f250"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\f251"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\f253"}.fa-hourglass:before{content:"\f254"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:"\f255"}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:"\f256"}.fa-hand-scissors-o:before{content:"\f257"}.fa-hand-lizard-o:before{content:"\f258"}.fa-hand-spock-o:before{content:"\f259"}.fa-hand-pointer-o:before{content:"\f25a"}.fa-hand-peace-o:before{content:"\f25b"}.fa-trademark:before{content:"\f25c"}.fa-registered:before{content:"\f25d"}.fa-creative-commons:before{content:"\f25e"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-tripadvisor:before{content:"\f262"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-get-pocket:before{content:"\f265"}.fa-wikipedia-w:before{content:"\f266"}.fa-safari:before{content:"\f267"}.fa-chrome:before{content:"\f268"}.fa-firefox:before{content:"\f269"}.fa-opera:before{content:"\f26a"}.fa-internet-explorer:before{content:"\f26b"}.fa-tv:before,.fa-television:before{content:"\f26c"}.fa-contao:before{content:"\f26d"}.fa-500px:before{content:"\f26e"}.fa-amazon:before{content:"\f270"}.fa-calendar-plus-o:before{content:"\f271"}.fa-calendar-minus-o:before{content:"\f272"}.fa-calendar-times-o:before{content:"\f273"}.fa-calendar-check-o:before{content:"\f274"}.fa-industry:before{content:"\f275"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-map-o:before{content:"\f278"}.fa-map:before{content:"\f279"}.fa-commenting:before{content:"\f27a"}.fa-commenting-o:before{content:"\f27b"}.fa-houzz:before{content:"\f27c"}.fa-vimeo-v:before,.fa-vimeo:before{content:"\f27d"}.fa-black-tie:before{content:"\f27e"}.fa-fonticons:before{content:"\f280"}.fa-reddit-alien:before{content:"\f281"}.fa-edge:before{content:"\f282"}.fa-credit-card-alt:before{content:"\f283"}.fa-codiepie:before{content:"\f284"}.fa-modx:before{content:"\f285"}.fa-fort-awesome:before{content:"\f286"}.fa-usb:before{content:"\f287"}.fa-product-hunt:before{content:"\f288"}.fa-mixcloud:before{content:"\f289"}.fa-scribd:before{content:"\f28a"}.fa-pause-circle:before{content:"\f28b"}.fa-pause-circle-o:before{content:"\f28c"}.fa-stop-circle:before{content:"\f28d"}.fa-stop-circle-o:before{content:"\f28e"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-hashtag:before{content:"\f292"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-percent:before{content:"\f295"}.fa-gitlab:before{content:"\f296"}.fa-wpbeginner:before{content:"\f297"}.fa-wpforms:before{content:"\f298"}.fa-envira:before{content:"\f299"}.fa-universal-access:before{content:"\f29a"}.fa-wheelchair-alt:before{content:"\f29b"}.fa-question-circle-o:before{content:"\f29c"}.fa-blind:before{content:"\f29d"}.fa-audio-description:before{content:"\f29e"}.fa-phone-volume:before,.fa-volume-control-phone:before{content:"\f2a0"}.fa-braille:before{content:"\f2a1"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:"\f2a4"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-signing:before,.fa-sign-language:before{content:"\f2a7"}.fa-low-vision:before{content:"\f2a8"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-first-order:before{content:"\f2b0"}.fa-yoast:before{content:"\f2b1"}.fa-themeisle:before{content:"\f2b2"}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:"\f2b3"}.fa-fa:before,.fa-font-awesome:before{content:"\f2b4"}.fa-handshake-o:before{content:"\f2b5"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-o:before{content:"\f2b7"}.fa-linode:before{content:"\f2b8"}.fa-address-book:before{content:"\f2b9"}.fa-address-book-o:before{content:"\f2ba"}.fa-vcard:before,.fa-address-card:before{content:"\f2bb"}.fa-vcard-o:before,.fa-address-card-o:before{content:"\f2bc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-circle-o:before{content:"\f2be"}.fa-user-o:before{content:"\f2c0"}.fa-id-badge:before{content:"\f2c1"}.fa-drivers-license:before,.fa-id-card:before{content:"\f2c2"}.fa-drivers-license-o:before,.fa-id-card-o:before{content:"\f2c3"}.fa-quora:before{content:"\f2c4"}.fa-free-code-camp:before{content:"\f2c5"}.fa-telegram:before{content:"\f2c6"}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\f2cb"}.fa-shower:before{content:"\f2cc"}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:"\f2cd"}.fa-podcast:before{content:"\f2ce"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-times-rectangle:before,.fa-window-close:before{content:"\f2d3"}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:"\f2d4"}.fa-bandcamp:before{content:"\f2d5"}.fa-grav:before{content:"\f2d6"}.fa-etsy:before{content:"\f2d7"}.fa-imdb:before{content:"\f2d8"}.fa-ravelry:before{content:"\f2d9"}.fa-eercast:before{content:"\f2da"}.fa-microchip:before{content:"\f2db"}.fa-snowflake-o:before{content:"\f2dc"}.fa-superpowers:before{content:"\f2dd"}.fa-wpexplorer:before{content:"\f2de"}.fa-meetup:before{content:"\f2e0"}.fa-mastodon:before{content:"\f2e1"}.fa-mastodon-alt:before{content:"\f2e2"}.fa-fork-circle:before,.fa-fork-awesome:before{content:"\f2e3"}.fa-peertube:before{content:"\f2e4"}.fa-diaspora:before{content:"\f2e5"}.fa-friendica:before{content:"\f2e6"}.fa-gnu-social:before{content:"\f2e7"}.fa-liberapay-square:before{content:"\f2e8"}.fa-liberapay:before{content:"\f2e9"}.fa-ssb:before,.fa-scuttlebutt:before{content:"\f2ea"}.fa-hubzilla:before{content:"\f2eb"}.fa-social-home:before{content:"\f2ec"}.fa-artstation:before{content:"\f2ed"}.fa-discord:before{content:"\f2ee"}.fa-discord-alt:before{content:"\f2ef"}.fa-patreon:before{content:"\f2f0"}.fa-snowdrift:before{content:"\f2f1"}.fa-activitypub:before{content:"\f2f2"}.fa-ethereum:before{content:"\f2f3"}.fa-keybase:before{content:"\f2f4"}.fa-shaarli:before{content:"\f2f5"}.fa-shaarli-o:before{content:"\f2f6"}.fa-cut-key:before,.fa-key-modern:before{content:"\f2f7"}.fa-xmpp:before{content:"\f2f9"}.fa-archive-org:before{content:"\f2fc"}.fa-freedombox:before{content:"\f2fd"}.fa-facebook-messenger:before{content:"\f2fe"}.fa-debian:before{content:"\f2ff"}.fa-mastodon-square:before{content:"\f300"}.fa-tipeee:before{content:"\f301"}.fa-react:before{content:"\f302"}.fa-dogmazic:before{content:"\f303"}.fa-zotero:before{content:"\f309"}.fa-nodejs:before{content:"\f308"}.fa-nextcloud:before{content:"\f306"}.fa-nextcloud-square:before{content:"\f307"}.fa-hackaday:before{content:"\f30a"}.fa-laravel:before{content:"\f30b"}.fa-signalapp:before{content:"\f30c"}.fa-gnupg:before{content:"\f30d"}.fa-php:before{content:"\f30e"}.fa-ffmpeg:before{content:"\f30f"}.fa-joplin:before{content:"\f310"}.fa-syncthing:before{content:"\f311"}.fa-inkscape:before{content:"\f312"}.fa-matrix-org:before{content:"\f313"}.fa-pixelfed:before{content:"\f314"}.fa-bootstrap:before{content:"\f315"}.fa-dev-to:before{content:"\f316"}.fa-hashnode:before{content:"\f317"}.fa-jirafeau:before{content:"\f318"}.fa-emby:before{content:"\f319"}.fa-wikidata:before{content:"\f31a"}.fa-gimp:before{content:"\f31b"}.fa-c:before{content:"\f31c"}.fa-digitalocean:before{content:"\f31d"}.fa-att:before{content:"\f31e"}.fa-gitea:before{content:"\f31f"}.fa-file-epub:before{content:"\f321"}.fa-python:before{content:"\f322"}.fa-archlinux:before{content:"\f323"}.fa-pleroma:before{content:"\f324"}.fa-unsplash:before{content:"\f325"}.fa-hackster:before{content:"\f326"}.fa-spell-check:before{content:"\f327"}.fa-moon:before{content:"\f328"}.fa-sun:before{content:"\f329"}.fa-f-droid:before{content:"\f32a"}.fa-biometric:before{content:"\f32b"}.fa-wire:before{content:"\f32c"}.fa-tor-onion:before{content:"\f32e"}.fa-volume-mute:before{content:"\f32f"}.fa-bell-ringing:before{content:"\f32d"}.fa-bell-ringing-o:before{content:"\f330"}.fa-hal:before{content:"\f333"}.fa-jupyter:before{content:"\f335"}.fa-julia:before{content:"\f334"}.fa-classicpress:before{content:"\f331"}.fa-classicpress-circle:before{content:"\f332"}.fa-open-collective:before{content:"\f336"}.fa-orcid:before{content:"\f337"}.fa-researchgate:before{content:"\f338"}.fa-funkwhale:before{content:"\f339"}.fa-askfm:before{content:"\f33a"}.fa-blockstack:before{content:"\f33b"}.fa-boardgamegeek:before{content:"\f33c"}.fa-bunny:before{content:"\f35f"}.fa-buymeacoffee:before{content:"\f33d"}.fa-cc-by:before{content:"\f33e"}.fa-creative-commons-alt:before,.fa-cc-cc:before{content:"\f33f"}.fa-cc-nc-eu:before{content:"\f341"}.fa-cc-nc-jp:before{content:"\f342"}.fa-cc-nc:before{content:"\f340"}.fa-cc-nd:before{content:"\f343"}.fa-cc-pd:before{content:"\f344"}.fa-cc-remix:before{content:"\f345"}.fa-cc-sa:before{content:"\f346"}.fa-cc-share:before{content:"\f347"}.fa-cc-zero:before{content:"\f348"}.fa-conway-hacker:before,.fa-conway-glider:before{content:"\f349"}.fa-csharp:before{content:"\f34a"}.fa-email-bulk:before{content:"\f34b"}.fa-email-bulk-o:before{content:"\f34c"}.fa-gnu:before{content:"\f34d"}.fa-google-play:before{content:"\f34e"}.fa-heroku:before{content:"\f34f"}.fa-hassio:before,.fa-home-assistant:before{content:"\f350"}.fa-java:before{content:"\f351"}.fa-mariadb:before{content:"\f352"}.fa-markdown:before{content:"\f353"}.fa-mysql:before{content:"\f354"}.fa-nordcast:before{content:"\f355"}.fa-plume:before{content:"\f356"}.fa-postgresql:before{content:"\f357"}.fa-sass-alt:before{content:"\f359"}.fa-sass:before{content:"\f358"}.fa-skate:before{content:"\f35a"}.fa-sketchfab:before{content:"\f35b"}.fa-tex:before{content:"\f35c"}.fa-textpattern:before{content:"\f35d"}.fa-unity:before{content:"\f35e"}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}@font-face{font-family:baskerville;src:url(/fonts/Libre_Baskerville/LibreBaskerville-Regular.ttf)format("truetype");font-weight:"regular";font-style:"regular"}@font-face{font-family:baskerville-bold;src:url(/fonts/Libre_Baskerville/LibreBaskerville-Bold.ttf)format("truetype");font-weight:"bold";font-style:"bold"}@font-face{font-family:baskerville-italic;src:url(/fonts/Libre_Baskerville/LibreBaskerville-Italic.ttf)format("truetype");font-weight:"regular";font-style:"regular"}*,*:after,*:before{box-sizing:inherit}html{box-sizing:border-box;font-size:62.5%}body{color:#212121;background-color:#fff;font-family:-apple-system,baskerville,BlinkMacSystemFont,segoe ui,Cantarell,Helvetica,游ゴシック,pingfang sc,STXihei,华文细黑,microsoft yahei,微软雅黑,SimSun,宋体,Heiti,黑体,sans-serif;font-size:1.8em;font-weight:400;line-height:1.8em}@media only screen and (max-width:768px){body{font-size:1.6em;line-height:1.6em}}iframe[src*=disqus]{color-scheme:light}a{font-weight:500;color:#1565c0;text-decoration:none;transition:all .25s ease-in}a:focus,a:hover{text-decoration:underline}p{margin:2rem 0}h1,h2,h3,h4,h5,h6{font-family:-apple-system,baskerville,BlinkMacSystemFont,segoe ui,Cantarell,Helvetica,游ゴシック,pingfang sc,STXihei,华文细黑,microsoft yahei,微软雅黑,SimSun,宋体,Heiti,黑体,sans-serif;font-weight:600;color:#000;margin:4rem 0 2.5rem}h1:hover .heading-link,h2:hover .heading-link,h3:hover .heading-link,h4:hover .heading-link,h5:hover .heading-link,h6:hover .heading-link{visibility:visible}h1 .heading-link,h2 .heading-link,h3 .heading-link,h4 .heading-link,h5 .heading-link,h6 .heading-link{color:#1565c0;font-weight:inherit;text-decoration:none;font-size:80%;visibility:hidden}h1 .title-link,h2 .title-link,h3 .title-link,h4 .title-link,h5 .title-link,h6 .title-link{color:inherit;font-weight:inherit;text-decoration:none}h1{font-size:3.2rem;line-height:3.6rem}@media only screen and (max-width:768px){h1{font-size:3rem;line-height:3.4rem}}h2{font-size:2.8rem;line-height:3.2rem}@media only screen and (max-width:768px){h2{font-size:2.6rem;line-height:3rem}}h3{font-size:2.4rem;line-height:2.8rem}@media only screen and (max-width:768px){h3{font-size:2.2rem;line-height:2.6rem}}h4{font-size:2.2rem;line-height:2.6rem}@media only screen and (max-width:768px){h4{font-size:2rem;line-height:2.4rem}}h5{font-size:2rem;line-height:2.4rem}@media only screen and (max-width:768px){h5{font-size:1.8rem;line-height:2.2rem}}h6{font-size:1.8rem;line-height:2.2rem}@media only screen and (max-width:768px){h6{font-size:1.6rem;line-height:2rem}}b,strong{font-weight:700}.highlight div,.highlight pre{margin:2rem 0;padding:1rem;border-radius:1rem}pre{display:block;font-family:SFMono-Regular,Consolas,Liberation Mono,Menlo,monospace;font-size:1.6rem;font-weight:400;line-height:2.6rem;overflow-x:auto;margin:2rem 0;padding:1rem;border-radius:1rem}pre code{display:inline-block;background-color:inherit;color:inherit}code{font-family:SFMono-Regular,Consolas,Liberation Mono,Menlo,monospace;font-size:1.6rem;font-weight:400;border-radius:.6rem;padding:.3rem .6rem;background-color:#ccc;color:#212121}blockquote{border-left:2px solid #e0e0e0;padding-left:2rem;line-height:2.2rem;font-weight:400;font-style:italic}th,td{padding:1.6rem}table{border-collapse:collapse}table td,table th{border:2px solid #000}table tr:first-child th{border-top:0}table tr:last-child td{border-bottom:0}table tr td:first-child,table tr th:first-child{border-left:0}table tr td:last-child,table tr th:last-child{border-right:0}img{width:100%}figure{text-align:center}.footnotes ol li p{margin:0}.preload-transitions *{-webkit-transition:none!important;-moz-transition:none!important;-ms-transition:none!important;-o-transition:none!important;transition:none!important}.wrapper{display:flex;flex-direction:column;min-height:100vh;width:100%}.container{margin:1rem auto;max-width:90rem;width:100%;padding-left:2rem;padding-right:2rem}.fab{font-weight:400}.fas{font-weight:700}.float-right{float:right}.float-left{float:left}.fab{font-weight:400}.fas{font-weight:900}.content{flex:1;display:flex;margin-top:1.6rem;margin-bottom:3.2rem}.content header{margin-top:6.4rem;margin-bottom:3.2rem}.content header h1{font-size:4.2rem;line-height:4.6rem;margin:0}@media only screen and (max-width:768px){.content header h1{font-size:4rem;line-height:4.4rem}}.content article{}.content article details summary{cursor:pointer}.content article footer{margin-top:4rem}.content article footer .see-also{margin:3.2rem 0}.content article footer .see-also h3{margin:3.2rem 0}.content article p{text-align:justify;text-justify:auto;hyphens:auto}.content .post .post-title{margin-bottom:.75em}.content .post .post-meta i{text-align:center;width:1.6rem;margin-left:0;margin-right:.5rem}.content .post .post-meta .date .posted-on{margin-left:0;margin-right:1.5rem}.content .post .post-meta .tags .tag{display:inline-block;padding:.3rem .6rem;background-color:#e0e0e0;border-radius:.6rem;line-height:1.4em}.content .post .post-meta .tags .tag a{color:#212121}.content .post .post-meta .tags .tag a:active{color:#212121}.content figure{margin:0;padding:0}.content figcaption p{text-align:center;font-style:italic;font-size:1.6rem;margin:0}.avatar img{width:20rem;height:auto;border-radius:50%}@media only screen and (max-width:768px){.avatar img{width:10rem}}.list ul{margin:3.2rem 0;list-style:none;padding:0}.list ul li{font-size:1.8rem}@media only screen and (max-width:768px){.list ul li{margin:1.6rem 0}}.list ul li .date{display:inline-block;flex:1;width:20rem;text-align:right;margin-right:3rem}@media only screen and (max-width:768px){.list ul li .date{display:block;text-align:left}}.list ul li .title{font-size:1.8rem;flex:2;color:#212121;font-family:-apple-system,baskerville,BlinkMacSystemFont,segoe ui,Cantarell,Helvetica,游ゴシック,pingfang sc,STXihei,华文细黑,microsoft yahei,微软雅黑,SimSun,宋体,Heiti,黑体,sans-serif;font-weight:700}.list ul li .title:hover,.list ul li .title:focus{color:#1565c0}@media only screen and (min-width:768.1px){.list ul:not(.pagination) li{display:flex}}.centered{display:flex;align-items:center;justify-content:center}.centered .about{text-align:center}.centered .about h1{margin-top:2rem;margin-bottom:.5rem}.centered .about h2{margin-top:1rem;margin-bottom:.5rem;font-size:2.4rem}@media only screen and (max-width:768px){.centered .about h2{font-size:2rem}}.centered .about ul{list-style:none;margin:3rem 0 1rem;padding:0}.centered .about ul li{display:inline-block;position:relative}.centered .about ul li a{color:#212121;text-transform:uppercase;margin-left:1rem;margin-right:1rem;font-size:1.6rem}.centered .about ul li a:hover,.centered .about ul li a:focus{color:#1565c0}@media only screen and (max-width:768px){.centered .about ul li a{font-size:1.4rem}}.centered .error{text-align:center}.centered .error h1{margin-top:2rem;margin-bottom:.5rem;font-size:4.6rem}@media only screen and (max-width:768px){.centered .error h1{font-size:3.2rem}}.centered .error h2{margin-top:2rem;margin-bottom:3.2rem;font-size:3.2rem}@media only screen and (max-width:768px){.centered .error h2{font-size:2.8rem}}.notice{border-radius:.2rem;position:relative;margin:2rem 0;padding:0 .75rem;overflow:auto}.notice .notice-title{position:relative;font-weight:700;margin:0 -.75rem;padding:.2rem 3.5rem;border-bottom:1px solid #fff}.notice .notice-title i{position:absolute;top:50%;left:1.8rem;transform:translate(-50%,-50%)}.notice .notice-content{display:block;margin:2rem}.notice.note{background-color:#7e57c21a}.notice.note .notice-title{background-color:#673ab71a}.notice.note .notice-title i{color:#5e35b1}.notice.tip{background-color:#26a69a1a}.notice.tip .notice-title{background-color:#0096881a}.notice.tip .notice-title i{color:#00897b}.notice.example{background-color:#8d6e631a}.notice.example .notice-title{background-color:#7955481a}.notice.example .notice-title i{color:#6d4c41}.notice.question{background-color:#9ccc651a}.notice.question .notice-title{background-color:#8bc34a1a}.notice.question .notice-title i{color:#7cb342}.notice.info{background-color:#42a5f51a}.notice.info .notice-title{background-color:#2196f31a}.notice.info .notice-title i{color:#1e88e5}.notice.warning{background-color:#ffca281a}.notice.warning .notice-title{background-color:#ffc1071a}.notice.warning .notice-title i{color:#ffb300}.notice.error{background-color:#ef53501a}.notice.error .notice-title{background-color:#f443361a}.notice.error .notice-title i{color:#e53935}.navigation{height:6rem;width:100%}.navigation a,.navigation span{display:inline;font-size:1.7rem;font-family:-apple-system,baskerville,BlinkMacSystemFont,segoe ui,Cantarell,Helvetica,游ゴシック,pingfang sc,STXihei,华文细黑,microsoft yahei,微软雅黑,SimSun,宋体,Heiti,黑体,sans-serif;font-weight:600;color:#212121}.navigation a:hover,.navigation a:focus{color:#1565c0}.navigation .navigation-title{letter-spacing:.1rem;text-transform:uppercase}.navigation .navigation-list{float:right;list-style:none;margin-bottom:0;margin-top:0}@media only screen and (max-width:768px){.navigation .navigation-list{position:relative;top:2rem;right:0;z-index:5;visibility:hidden;opacity:0;padding:0;max-height:0;width:100%;background-color:#fff;border-top:solid 2px #e0e0e0;border-bottom:solid 2px #e0e0e0;transition:opacity .25s,max-height .15s linear}}.navigation .navigation-list .navigation-item{float:left;margin:0;position:relative}@media only screen and (max-width:768px){.navigation .navigation-list .navigation-item{float:none!important;text-align:center}.navigation .navigation-list .navigation-item a,.navigation .navigation-list .navigation-item span{line-height:5rem}}.navigation .navigation-list .navigation-item a,.navigation .navigation-list .navigation-item span{margin-left:1rem;margin-right:1rem}@media only screen and (max-width:768px){.navigation .navigation-list .separator{display:none}}@media only screen and (max-width:768px){.navigation .navigation-list .menu-separator{border-top:2px solid #212121;margin:0 8rem}.navigation .navigation-list .menu-separator span{display:none}}.navigation #dark-mode-toggle{margin:1.7rem 0;font-size:2.4rem;line-height:inherit;bottom:2rem;left:2rem;z-index:100;position:fixed}.navigation #menu-toggle{display:none}@media only screen and (max-width:768px){.navigation #menu-toggle{display:initial;position:relative;left:-99999px;opacity:0}.navigation #menu-toggle:checked+label>i{color:#e0e0e0}.navigation #menu-toggle:checked+label+ul{visibility:visible;opacity:1;max-height:100rem}.navigation #menu-toggle:focus-visible+label{outline-style:auto}}.navigation .menu-button{display:none}@media only screen and (max-width:768px){.navigation .menu-button{position:relative;display:block;font-size:2.4rem;font-weight:400}}.navigation .menu-button i:hover,.navigation .menu-button i:focus{color:#000}.navigation i{color:#212121;cursor:pointer}.navigation i:hover,.navigation i:focus{color:#1565c0}.pagination{margin-top:6rem;text-align:center;font-family:-apple-system,baskerville,BlinkMacSystemFont,segoe ui,Cantarell,Helvetica,游ゴシック,pingfang sc,STXihei,华文细黑,microsoft yahei,微软雅黑,SimSun,宋体,Heiti,黑体,sans-serif}.pagination li{display:inline;text-align:center;font-weight:700}.pagination li span{margin:0;text-align:center;width:3.2rem}.pagination li a{font-weight:300}.pagination li a span{margin:0;text-align:center;width:3.2rem}.tabs{display:flex;flex-wrap:wrap;margin:2rem 0;position:relative}.tabs.tabs-left{justify-content:flex-start}.tabs.tabs-left label.tab-label{margin-right:.5rem}.tabs.tabs-left .tab-content{border-radius:0 4px 4px 4px}.tabs.tabs-right{justify-content:flex-end}.tabs.tabs-right label.tab-label{margin-left:.5rem}.tabs.tabs-right .tab-content{border-radius:4px 0 4px 4px}.tabs input.tab-input{display:none}.tabs label.tab-label{background-color:#e0e0e0;border-color:#ccc;border-radius:4px 4px 0 0;border-style:solid;border-bottom-style:hidden;border-width:1px;cursor:pointer;display:inline-block;order:1;padding:.3rem .6rem;position:relative;top:1px;user-select:none}.tabs input.tab-input:checked+label.tab-label{background-color:#fff}.tabs .tab-content{background-color:#fff;border-color:#ccc;border-style:solid;border-width:1px;display:none;order:2;padding:1rem;width:100%}.tabs.tabs-code .tab-content{padding:.5rem}.tabs.tabs-code .tab-content pre{margin:0}.taxonomy li{display:inline-block;margin:.9rem}.taxonomy .taxonomy-element{display:block;padding:.3rem .9rem;background-color:#e0e0e0;border-radius:.6rem}.taxonomy .taxonomy-element a{color:#212121}.taxonomy .taxonomy-element a:active{color:#212121}.footer{width:100%;text-align:center;font-size:1.6rem;line-height:2rem;margin-bottom:1rem}.footer a{color:#1565c0}.float-container{bottom:2rem;right:2rem;z-index:100;position:fixed;font-size:1.6em}.float-container a{position:relative;display:inline-block;width:3rem;height:3rem;font-size:2rem;color:#000;background-color:#e0e0e0;border-radius:.2rem;opacity:.5;transition:all .25s ease-in}.float-container a:hover,.float-container a:focus{color:#1565c0;opacity:1}@media only screen and (max-width:768px){.float-container a:hover,.float-container a:focus{color:#000;opacity:.5}}.float-container a i{position:absolute;top:50%;left:50%;transform:translate(-50%,-50%)}.bg{background-color:#fff}.chroma{background-color:#fff}.chroma .err{color:#a61717;background-color:#e3d2d2}.chroma .lnlinks{outline:none;text-decoration:none;color:inherit}.chroma .lntd{vertical-align:top;padding:0;margin:0;border:0}.chroma .lntable{border-spacing:0;padding:0;margin:0;border:0}.chroma .hl{background-color:#ffc}.chroma .lnt{white-space:pre;user-select:none;margin-right:.4em;padding:0 .4em;color:#7f7f7f}.chroma .ln{white-space:pre;user-select:none;margin-right:.4em;padding:0 .4em;color:#7f7f7f}.chroma .line{display:flex}.chroma .k{color:#000;font-weight:700}.chroma .kc{color:#000;font-weight:700}.chroma .kd{color:#000;font-weight:700}.chroma .kn{color:#000;font-weight:700}.chroma .kp{color:#000;font-weight:700}.chroma .kr{color:#000;font-weight:700}.chroma .kt{color:#458;font-weight:700}.chroma .na{color:teal}.chroma .nb{color:#0086b3}.chroma .bp{color:#999}.chroma .nc{color:#458;font-weight:700}.chroma .no{color:teal}.chroma .nd{color:#3c5d5d;font-weight:700}.chroma .ni{color:purple}.chroma .ne{color:#900;font-weight:700}.chroma .nf{color:#900;font-weight:700}.chroma .nl{color:#900;font-weight:700}.chroma .nn{color:#555}.chroma .nt{color:navy}.chroma .nv{color:teal}.chroma .vc{color:teal}.chroma .vg{color:teal}.chroma .vi{color:teal}.chroma .s{color:#d14}.chroma .sa{color:#d14}.chroma .sb{color:#d14}.chroma .sc{color:#d14}.chroma .dl{color:#d14}.chroma .sd{color:#d14}.chroma .s2{color:#d14}.chroma .se{color:#d14}.chroma .sh{color:#d14}.chroma .si{color:#d14}.chroma .sx{color:#d14}.chroma .sr{color:#009926}.chroma .s1{color:#d14}.chroma .ss{color:#990073}.chroma .m{color:#099}.chroma .mb{color:#099}.chroma .mf{color:#099}.chroma .mh{color:#099}.chroma .mi{color:#099}.chroma .il{color:#099}.chroma .mo{color:#099}.chroma .o{color:#000;font-weight:700}.chroma .ow{color:#000;font-weight:700}.chroma .c{color:#998;font-style:italic}.chroma .ch{color:#998;font-style:italic}.chroma .cm{color:#998;font-style:italic}.chroma .c1{color:#998;font-style:italic}.chroma .cs{color:#999;font-weight:700;font-style:italic}.chroma .cp{color:#999;font-weight:700;font-style:italic}.chroma .cpf{color:#999;font-weight:700;font-style:italic}.chroma .gd{color:#000;background-color:#fdd}.chroma .ge{color:#000;font-style:italic}.chroma .gr{color:#a00}.chroma .gh{color:#999}.chroma .gi{color:#000;background-color:#dfd}.chroma .go{color:#888}.chroma .gp{color:#555}.chroma .gs{font-weight:700}.chroma .gu{color:#aaa}.chroma .gt{color:#a00}.chroma .gl{text-decoration:underline}.chroma .w{color:#bbb} \ No newline at end of file diff --git a/static/favicon.ico b/favicon.ico similarity index 100% rename from static/favicon.ico rename to favicon.ico diff --git a/static/favicon.png b/favicon.png similarity index 100% rename from static/favicon.png rename to favicon.png diff --git a/static/fonts/Libre_Baskerville/LibreBaskerville-Bold.ttf b/fonts/Libre_Baskerville/LibreBaskerville-Bold.ttf similarity index 100% rename from static/fonts/Libre_Baskerville/LibreBaskerville-Bold.ttf rename to fonts/Libre_Baskerville/LibreBaskerville-Bold.ttf diff --git a/static/fonts/Libre_Baskerville/LibreBaskerville-Italic.ttf b/fonts/Libre_Baskerville/LibreBaskerville-Italic.ttf similarity index 100% rename from static/fonts/Libre_Baskerville/LibreBaskerville-Italic.ttf rename to fonts/Libre_Baskerville/LibreBaskerville-Italic.ttf diff --git a/static/fonts/Libre_Baskerville/LibreBaskerville-Regular.ttf b/fonts/Libre_Baskerville/LibreBaskerville-Regular.ttf similarity index 100% rename from static/fonts/Libre_Baskerville/LibreBaskerville-Regular.ttf rename to fonts/Libre_Baskerville/LibreBaskerville-Regular.ttf diff --git a/static/fonts/Libre_Baskerville/OFL.txt b/fonts/Libre_Baskerville/OFL.txt similarity index 100% rename from static/fonts/Libre_Baskerville/OFL.txt rename to fonts/Libre_Baskerville/OFL.txt diff --git a/themes/hugo-coder/static/fonts/forkawesome-webfont.eot b/fonts/forkawesome-webfont.eot similarity index 100% rename from themes/hugo-coder/static/fonts/forkawesome-webfont.eot rename to fonts/forkawesome-webfont.eot diff --git a/themes/hugo-coder/static/fonts/forkawesome-webfont.svg b/fonts/forkawesome-webfont.svg similarity index 100% rename from themes/hugo-coder/static/fonts/forkawesome-webfont.svg rename to fonts/forkawesome-webfont.svg diff --git a/themes/hugo-coder/static/fonts/forkawesome-webfont.ttf b/fonts/forkawesome-webfont.ttf similarity index 100% rename from themes/hugo-coder/static/fonts/forkawesome-webfont.ttf rename to fonts/forkawesome-webfont.ttf diff --git a/themes/hugo-coder/static/fonts/forkawesome-webfont.woff b/fonts/forkawesome-webfont.woff similarity index 100% rename from themes/hugo-coder/static/fonts/forkawesome-webfont.woff rename to fonts/forkawesome-webfont.woff diff --git a/themes/hugo-coder/static/fonts/forkawesome-webfont.woff2 b/fonts/forkawesome-webfont.woff2 similarity index 100% rename from themes/hugo-coder/static/fonts/forkawesome-webfont.woff2 rename to fonts/forkawesome-webfont.woff2 diff --git a/index.html b/index.html new file mode 100644 index 00000000..ed096d19 --- /dev/null +++ b/index.html @@ -0,0 +1,303 @@ + + + + + Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+ + +
avatar
+ + + + + +

Sara Zan

+ +

Python and LLMs specialist, +#1 contributor of Haystack, +former CERN employee.

+

I’m also an opinionated sci-fi reader, hiker, tinkerer and somewhat polyglot. Currently busy trying to learn Portuguese and Hungarian at the same time.

+

+ + + + + + +
+ + + +
+ + +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/index.xml b/index.xml new file mode 100644 index 00000000..0a8857df --- /dev/null +++ b/index.xml @@ -0,0 +1,190 @@ + + + + Sara Zan + https://www.zansara.dev/ + Recent content on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + Canals: a new concept of Pipeline + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + As we have seen in the previous episode of this series, Haystack&rsquo;s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. +What does this mean in practice? Let&rsquo;s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + Office Hours: RAG Pipelines + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Recording: Office Hours - RAG Pipelines +Notebook: RAG_Pipelines.ipynb +All the material can also be found here. +In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. +In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. + + + Why rewriting Haystack?! + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Wed, 11 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Before even diving into what Haystack 2.0 is, how it was built, and how it works, let&rsquo;s spend a few words about the whats and the whys. +First of all, what is Haystack? +And next, why on Earth did we decide to rewrite it from the ground up? +A Pioneer Framework Link to heading Haystack is a relatively young framework, its initial release dating back to November 28th, 2019. Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. + + + Haystack 2.0: What is it? + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + Tue, 10 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + December is finally approaching, and with it the release of a Haystack 2.0. At deepset, we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline. +But what is it that makes this release so special? +In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. + + + An (unofficial) Python SDK for Verbix + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + PyPI package: https://pypi.org/project/verbix-sdk/ +GitHub Repo: https://github.com/ZanSara/verbix-sdk +Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md +As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were &ldquo;simple&rdquo;: +Supports many languages, including Italian, Portuguese and Hungarian Conjugates irregulars properly Offers an API access to the conjugation tables Refuses to conjugate anything except for known verbs (Optional) Highlights the irregularities in some way Surprisingly these seem to be a shortage of good alternatives in this field. + + + Office Hours: Haystack 2.0 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Thu, 03 Aug 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Recording: Haystack v2 - Office Hours +Slides: Haystack v2 - Status Update +All the material can also be found here. +In this Office Hours I&rsquo;ve presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a preview module in the latest Haystack 1. + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + Adopting PyQt For Beam Instrumentation GUI Development At CERN + https://www.zansara.dev/publications/thpv014/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/publications/thpv014/ + Abstract Link to heading As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. + + + Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF) + https://www.zansara.dev/publications/thpv042/ + Sat, 11 Dec 2021 00:00:00 +0000 + https://www.zansara.dev/publications/thpv042/ + Abstract Link to heading The CERN accelerators require a large number of instruments, measuring different beam parameters like position, losses, current etc. The instruments’ associated electronics and software also produce information about their status. All these data are stored in a database for later analysis. The Beam Instrumentation group developed the Offline Analysis Framework some years ago to regularly and systematically analyze these data. The framework has been successfully used for nearly 100 different analyses that ran regularly by the end of the LHC run 2. + + + My Dotfiles + https://www.zansara.dev/posts/2021-12-11-dotfiles/ + Sat, 11 Dec 2021 00:00:00 +0000 + https://www.zansara.dev/posts/2021-12-11-dotfiles/ + GitHub Repo: https://github.com/ZanSara/dotfiles +What Linux developer would I be if I didn&rsquo;t also have my very own dotfiles repo? +After many years of iterations I finally found a combination that lasted quite a while, so I figured it&rsquo;s time to treat them as a real project. It was originally optimized for my laptop, but then I realized it works quite well on my three-monitor desk setup as well without major issues. + + + ZanzoCam: An open-source alpine web camera + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Slides: ZanzoCam: An open-source alpine web camera +On May 24th 2021 I held a talk about the ZanzoCam project as invited speaker for the &ldquo;Hardware and Software Codesign&rdquo; course at Università di Pavia. +The slides go through the entire lifecycle of the ZanzoCam project, from the very inception of it, the market research, our decision process, earlier prototypes, and then goes into a more detailed explanation of the the design and implementation of the project from a hardware and software perspective, with some notes about our financial situation and project management. + + + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs + https://www.zansara.dev/publications/tucpr03/ + Sun, 30 Aug 2020 00:00:00 +0000 + https://www.zansara.dev/publications/tucpr03/ + Abstract Link to heading For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. + + + ZanzoCam + https://www.zansara.dev/projects/zanzocam/ + Wed, 01 Jan 2020 00:00:00 +0000 + https://www.zansara.dev/projects/zanzocam/ + Main website: https://zanzocam.github.io/ +ZanzoCam is a low-power, low-frequency camera based on Raspberry Pi, designed to operate autonomously in remote locations and under harsh conditions. It was designed and developed between 2019 and 2021 for CAI Lombardia by a team of two people, with me as the software developer and the other responsible for the hardware design. CAI later deployed several of these devices on their affiliate huts. +ZanzoCams are designed to work reliably in the harsh conditions of alpine winters, be as power-efficient as possible, and tolerate unstable network connections: they feature a robust HTTP- or FTP-based picture upload strategy which is remotely configurable from a very simple, single-file web panel. + + + Evaluation of Qt as GUI Framework for Accelerator Controls + https://www.zansara.dev/publications/msc-thesis/ + Thu, 20 Dec 2018 00:00:00 +0000 + https://www.zansara.dev/publications/msc-thesis/ + This is the full-text of my MSc thesis, written in collaboration with Politecnico di Milano and CERN. +Get the full text here: Evaluation of Qt as GUI Framework for Accelerator Controls +Publisher&rsquo;s entry: 10589/144860. + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + About + https://www.zansara.dev/about/ + Mon, 01 Jan 0001 00:00:00 +0000 + https://www.zansara.dev/about/ + I am a Python and LLMs specialist currently working for deepset, a German startup working on NLP since &ldquo;before it was cool&rdquo;. At the moment I&rsquo;m the #1 contributor of Haystack, their open-source framework for building highly customizable, production-ready NLP and LLM applications. +Previously I have been working at CERN, where I began my software engineering career. During my time there I had the privilege of driving one major decision to migrate the graphical interface&rsquo;s software of the accelerator&rsquo;s control systems from Java to PyQt, and then of helping a client department migrate to this stack. + + + diff --git a/js/coder.min.6ae284be93d2d19dad1f02b0039508d9aab3180a12a06dcc71b0b0ef7825a317.js b/js/coder.min.6ae284be93d2d19dad1f02b0039508d9aab3180a12a06dcc71b0b0ef7825a317.js new file mode 100644 index 00000000..bbecf345 --- /dev/null +++ b/js/coder.min.6ae284be93d2d19dad1f02b0039508d9aab3180a12a06dcc71b0b0ef7825a317.js @@ -0,0 +1 @@ +const body=document.body,darkModeToggle=document.getElementById("dark-mode-toggle"),darkModeMediaQuery=window.matchMedia("(prefers-color-scheme: dark)");localStorage.getItem("colorscheme")?setTheme(localStorage.getItem("colorscheme")):setTheme(body.classList.contains("colorscheme-light")||body.classList.contains("colorscheme-dark")?body.classList.contains("colorscheme-dark")?"dark":"light":darkModeMediaQuery.matches?"dark":"light"),darkModeToggle&&darkModeToggle.addEventListener("click",()=>{let e=body.classList.contains("colorscheme-dark")?"light":"dark";setTheme(e),rememberTheme(e)}),darkModeMediaQuery.addListener(e=>{setTheme(e.matches?"dark":"light")}),document.addEventListener("DOMContentLoaded",function(){let e=document.querySelector(".preload-transitions");e.classList.remove("preload-transitions")});function setTheme(e){body.classList.remove("colorscheme-auto");let n=e==="dark"?"light":"dark";body.classList.remove("colorscheme-"+n),body.classList.add("colorscheme-"+e),document.documentElement.style["color-scheme"]=e;function t(e){return new Promise(t=>{if(document.querySelector(e))return t(document.querySelector(e));const n=new MutationObserver(s=>{document.querySelector(e)&&(t(document.querySelector(e)),n.disconnect())});n.observe(document.body,{childList:!0,subtree:!0})})}if(e==="dark"){const e={type:"set-theme",theme:"github-dark"};t(".utterances-frame").then(t=>{t.contentWindow.postMessage(e,"https://utteranc.es")})}else{const e={type:"set-theme",theme:"github-light"};t(".utterances-frame").then(t=>{t.contentWindow.postMessage(e,"https://utteranc.es")})}function s(e){const t=document.querySelector("iframe.giscus-frame");if(!t)return;t.contentWindow.postMessage({giscus:e},"https://giscus.app")}s({setConfig:{theme:e}});const o=new Event("themeChanged");document.dispatchEvent(o)}function rememberTheme(e){localStorage.setItem("colorscheme",e)} \ No newline at end of file diff --git a/static/me/avatar.jpeg b/me/avatar.jpeg similarity index 100% rename from static/me/avatar.jpeg rename to me/avatar.jpeg diff --git a/static/me/sara_zanzottera_cv.pdf b/me/sara_zanzottera_cv.pdf similarity index 100% rename from static/me/sara_zanzottera_cv.pdf rename to me/sara_zanzottera_cv.pdf diff --git a/static/posts/2021-12-11-dotfiles/cover.png b/posts/2021-12-11-dotfiles/cover.png similarity index 100% rename from static/posts/2021-12-11-dotfiles/cover.png rename to posts/2021-12-11-dotfiles/cover.png diff --git a/posts/2021-12-11-dotfiles/index.html b/posts/2021-12-11-dotfiles/index.html new file mode 100644 index 00000000..d86c8b0a --- /dev/null +++ b/posts/2021-12-11-dotfiles/index.html @@ -0,0 +1,287 @@ + + + + + + My Dotfiles · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + My Dotfiles + +

+
+ +
+ +
+ + Featured image + +

GitHub Repo: https://github.com/ZanSara/dotfiles

+
+

What Linux developer would I be if I didn’t also have my very own dotfiles repo?

+

After many years of iterations I finally found a combination that lasted quite a while, so I figured it’s time to treat them as a real project. It was originally optimized for my laptop, but then I realized it works quite well on my three-monitor desk setup as well without major issues.

+

It sports:

+
    +
  • i3-wm as window manager (of course, with gaps),
  • +
  • The typical trio of polybar , rofi and dunst to handle top bar, start menu and notifications respectively,
  • +
  • The odd choice of Ly as my display manager. I just love the minimal, TUI aesthetics of it. Don’t forget to enable Doom’s flames!
  • +
  • A minimalistic animated background from xscreensaver, Grav. It’s configured to leave no trails and stay black and white. An odd choice, and yet it manages to use no resources, stay very minimal, and bring a very (in my opinion) futuristic look to the entire setup.
  • +
  • OhMyBash with the font theme,
  • +
  • Other small amenities, like nmtui for network management, Japanese-numerals as workspace indicators, etc..
  • +
+

Feel free to take what you like. If you end up using any of these, make sure to share the outcomes!

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/posts/2023-09-10-python-verbix-sdk/cover.png b/posts/2023-09-10-python-verbix-sdk/cover.png similarity index 100% rename from static/posts/2023-09-10-python-verbix-sdk/cover.png rename to posts/2023-09-10-python-verbix-sdk/cover.png diff --git a/posts/2023-09-10-python-verbix-sdk/index.html b/posts/2023-09-10-python-verbix-sdk/index.html new file mode 100644 index 00000000..da4acfb7 --- /dev/null +++ b/posts/2023-09-10-python-verbix-sdk/index.html @@ -0,0 +1,323 @@ + + + + + + An (unofficial) Python SDK for Verbix · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + An (unofficial) Python SDK for Verbix + +

+
+ +
+ +
+ + Featured image + +

PyPI package: https://pypi.org/project/verbix-sdk/

+

GitHub Repo: https://github.com/ZanSara/verbix-sdk

+

Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md

+
+

As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were “simple”:

+
    +
  • Supports many languages, including Italian, Portuguese and Hungarian
  • +
  • Conjugates irregulars properly
  • +
  • Offers an API access to the conjugation tables
  • +
  • Refuses to conjugate anything except for known verbs
  • +
  • (Optional) Highlights the irregularities in some way
  • +
+

Surprisingly these seem to be a shortage of good alternatives in this field. All websites that host polished conjugation data don’t seem to offer API access (looking at you, Reverso – you’ll get your own post one day), and most of the simples ones use heuristics to conjugate, which makes them very prone to errors. So for now I ended up choosing Verbix to start from.

+

Unfortunately the website doesn’t inspire much confidence. I attempted to email the creator just to see them close their email account a while later, an update in their API seems to have stalled half-way, and the blog seems dead. I often have the feeling this site might go under any minute, as soon as their domain registration expires.

+

But there are pros to it, as long as it lasts. Verbix offers verbs conjugation and nouns declination tables for some very niche languages, dialects and conlangs, to a degree that many other popular websites does not even come close. To support such variety they use heuristic to create the conjugation tables, which is not the best: for Hungarian, for example, I could easily get it to conjugate for me verbs that don’t exist or that have spelling mistakes. On the other hand their API do have a field that says whether the verb is known or not, which is a great way to filter out false positives.

+

So I decided to go the extra mile and I wrote a small Python SDK for their API: verbix-sdk. Enjoy it while it lasts…

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/posts/2023-10-10-haystack-series-intro/cover.png b/posts/2023-10-10-haystack-series-intro/cover.png similarity index 100% rename from static/posts/2023-10-10-haystack-series-intro/cover.png rename to posts/2023-10-10-haystack-series-intro/cover.png diff --git a/posts/2023-10-10-haystack-series-intro/index.html b/posts/2023-10-10-haystack-series-intro/index.html new file mode 100644 index 00000000..8afb0260 --- /dev/null +++ b/posts/2023-10-10-haystack-series-intro/index.html @@ -0,0 +1,312 @@ + + + + + + Haystack 2.0: What is it? · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Haystack 2.0: What is it? + +

+
+ +
+ +
+ + Featured image + +

December is finally approaching, and with it the release of a Haystack 2.0. At deepset, we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline.

+

But what is it that makes this release so special?

+

In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. For our small team, this is a huge accomplishment.

+

In this series, I want to explain what Haystack 2 is from the perspective of the team that developed it. I’m gonna talk about what makes the new Pipeline so different from the old one, how to use new components and features, how these compare with the equivalent in Haystack 1 (when possible) and the principles that led the redesign. I had the pleasure (and sometimes the burden) of being involved in nearly all aspects of this process, from the requirements definition to the release, and I drove many of them through several iterations. In these posts, you can expect a mix of technical details and some diversions on the history and rationale behind each decision, as I’ve seen and understood them.

+

For the curious readers, we have already released a lot of information about Haystack 2.0: check out this this Github Discussion, or join us on Haystack’s Discord server and peek into the haystack-2.0 channel for regular updates. We are also slowly building brand new documentation for everything, and don’t worry: we’ll make sure to make it as outstanding as the Haystack 1.x version is.

+

We also regularly feature 2.0 features in our Office Hours on Discord. Follow @Haystack_AI or @deepset_ai on Twitter to stay up-to-date, or deepset on Linkedin. And you’ll find me and the rest of the team on GitHub frantically (re)writing code and filing down the rough edges before the big release.

+

Stay tuned!

+
+

Next: Why rewriting Haystack?!

+

See the entire series here: Haystack 2.0 series

+ +
+ + +
+ + +
+ + + + + +
+ + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/posts/2023-10-11-haystack-series-why/cover.png b/posts/2023-10-11-haystack-series-why/cover.png similarity index 100% rename from static/posts/2023-10-11-haystack-series-why/cover.png rename to posts/2023-10-11-haystack-series-why/cover.png diff --git a/posts/2023-10-11-haystack-series-why/index.html b/posts/2023-10-11-haystack-series-why/index.html new file mode 100644 index 00000000..6b80422f --- /dev/null +++ b/posts/2023-10-11-haystack-series-why/index.html @@ -0,0 +1,370 @@ + + + + + + Why rewriting Haystack?! · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Why rewriting Haystack?! + +

+
+ +
+ +
+ + Featured image + +

Before even diving into what Haystack 2.0 is, how it was built, and how it works, let’s spend a few words about the whats and the whys.

+

First of all, what is Haystack?

+

And next, why on Earth did we decide to rewrite it from the ground up?

+

+ A Pioneer Framework + + + Link to heading + +

+

Haystack is a relatively young framework, its initial release dating back to November 28th, 2019. Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. We were proud to enable use cases such as semantic search, FAQ matching, document similarity, document summarization, machine translation, language-agnostic search, and so on.

+

The field was niche but constantly moving, and research was lively. The BERT paper had been published a few months before Haystack’s first release, unlocking a small revolution. In the shade of much larger research labs, deepset, then just a pre-seed stage startup, was also pouring effort into research and model training.

+

In those times, competition was close to non-existent. The field was still quite technical, and most people didn’t fully understand its potential. We were free to explore features and use cases at our own pace and set the direction for our product. This allowed us to decide what to work on, what to double down on, and what to deprioritize, postpone, or ignore. Haystack was nurturing its own garden in what was fundamentally a green field.

+

+ ChatGPT + + + Link to heading + +

+

This rather idyllic situation came to an end all too abruptly at the end of November 2022, when ChatGPT was released.

+

For us in the NLP field, everything seemed to change overnight. Day by day. For months.

+

The speed of progress went from lively to faster-than-light all at once. Every company with the budget to train an LLM seemed to be doing so, and researchers kept releasing new models just as quickly. Open-source contributors pushed to reduce the hardware requirements for inference lower and lower. My best memory of those times is the drama of LlaMa’s first “release”: I remember betting on March 2nd that within a week I would be running LlaMa models on my laptop, and I wasn’t even surprised when my prediction turned out true with the release of llama.cpp on March 10th.

+

Of course, keeping up with this situation was far beyond us. Competitors started to spawn like mushrooms, and our space was quickly crowded with new startups, far more agile and aggressive than us. We suddenly needed to compete and realized we weren’t used to it.

+

+ PromptNode vs FARMReader + + + Link to heading + +

+

Luckily, Haystack seemed capable of keeping up, at least for a while. Thanks to the efforts of Vladimir Blagojevic, a few weeks after ChatGPT became a sensation, we added some decent support for LLMs in the form of PromptNode. Our SaaS team could soon bring new LLM-powered features to our customers. We even managed to add support for Agents, another hot topic in the wake of ChatGPT.

+

However, the go-to library for LLMs was not Haystack in the mind of most developers. It was LangChain, and for a long time, it seemed like we would never be able to challenge their status and popularity. Everyone was talking about it, everyone was building demos, products, and startups on it, its development speed was unbelievable and, in the day-to-day discourse of the newly born LLM community, Haystack was nowhere to be found.

+

Why?

+

That’s because no one even realized that Haystack, the semantic search framework from 2019, also supported LLMs. All our documentation, tutorials, blog posts, research efforts, models on HuggingFace, everything was pointing towards semantic search. LLMs were nowhere to be seen.

+

And semantic search was going down fast.

+

Reader Models downloads graph

+

The image above shows today’s monthly downloads for one of deepset’s most successful models on HuggingFace, +deepset/roberta-base-squad2. This model performs extractive Question Answering, our former primary use case before the release of ChatGPT. Even with more than one and a half million downloads monthly, this model is experiencing a disastrous collapse in popularity, and in the current landscape, it is unlikely to ever recover.

+

+ A (Sort Of) Pivot + + + Link to heading + +

+

In this context, around February 2023, we decided to bet on the rise of LLMs and committed to focus all our efforts towards becoming the #1 framework powering production-grade LLM applications.

+

As we quickly realized, this was by far not an easy proposition. Extractive QA was not only ingrained deeply in our public image but in our codebase as well: implementing and maintaining PromptNode was proving more and more painful by the day, and when we tried to fit the concept of Agents into Haystack, it felt uncomfortably like trying to force a square peg into a round hole.

+

Haystack pipelines made extractive QA straightforward for the users and were highly optimized for this use case. But supporting LLMs was nothing like enabling extractive QA. Using Haystack for LLMs was quite a painful experience, and at the same time, modifying the Pipeline class to accommodate them seemed like the best way to mess with all the users that relied on the current Pipeline for their existing, value-generating applications. Making mistakes with Pipeline could ruin us.

+

With this realization in mind, we took what seemed the best option for the future of Haystack: a rewrite. The knowledge and experience we gained while working on Haystack 1 could fuel the design of Haystack 2 and act as a reference frame for it. Unlike our competitors, we already knew a lot about how to make NLP work at scale. We made many mistakes we would avoid in our next iteration. We knew that focusing on the best possible developer experience fueled the growth of Haystack 1 in the early days, and we were committed to doing the same for the next version of it.

+

So, the redesign of Haystack started, and it started from the concept of Pipeline.

+

+ Fast-forward + + + Link to heading + +

+

Haystack 2.0 hasn’t been released yet, but for now, it seems that we have made the right decision at the start of the year.

+

Haystack’s name is starting to appear more often in discussions around LLMs. The general tone of the community is steadily shifting, and scaling up, rather than experimenting, is now the focus. Competitors are re-orienting themselves toward production-readiness, something we’re visibly more experienced with. At the same time, LangChain is becoming a victim of its own success, collecting more and more criticism for its lack of documentation, leaky abstractions, and confusing architecture. Other competitors are gaining steam, but the overall landscape no longer feels as hostile.

+

In the next post, I will explore the technical side of Haystack 2.0 and delve deeper into the concept of Pipelines: what they are, how to use them, how they evolved from Haystack 1 to Haystack 2, and why.

+
+

Next: Haystack’s Pipeline - A Deep Dive

+

Previous: Haystack 2.0: What is it?

+

See the entire series here: Haystack 2.0 series

+ +
+ + +
+ + +
+ + + + + +
+ + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/posts/2023-10-11-haystack-series-why/reader-model-downloads.png b/posts/2023-10-11-haystack-series-why/reader-model-downloads.png similarity index 100% rename from static/posts/2023-10-11-haystack-series-why/reader-model-downloads.png rename to posts/2023-10-11-haystack-series-why/reader-model-downloads.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/all-branching-pipelines.png b/posts/2023-10-15-haystack-series-pipeline/all-branching-pipelines.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/all-branching-pipelines.png rename to posts/2023-10-15-haystack-series-pipeline/all-branching-pipelines.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/branching-query-pipelines.png b/posts/2023-10-15-haystack-series-pipeline/branching-query-pipelines.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/branching-query-pipelines.png rename to posts/2023-10-15-haystack-series-pipeline/branching-query-pipelines.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/cover.png b/posts/2023-10-15-haystack-series-pipeline/cover.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/cover.png rename to posts/2023-10-15-haystack-series-pipeline/cover.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/filetypeclassifier-docs.png b/posts/2023-10-15-haystack-series-pipeline/filetypeclassifier-docs.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/filetypeclassifier-docs.png rename to posts/2023-10-15-haystack-series-pipeline/filetypeclassifier-docs.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/hybrid-retrieval.png b/posts/2023-10-15-haystack-series-pipeline/hybrid-retrieval.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/hybrid-retrieval.png rename to posts/2023-10-15-haystack-series-pipeline/hybrid-retrieval.png diff --git a/posts/2023-10-15-haystack-series-pipeline/index.html b/posts/2023-10-15-haystack-series-pipeline/index.html new file mode 100644 index 00000000..21d53f9c --- /dev/null +++ b/posts/2023-10-15-haystack-series-pipeline/index.html @@ -0,0 +1,617 @@ + + + + + + Haystack's Pipeline - A Deep Dive · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Haystack's Pipeline - A Deep Dive + +

+
+ +
+ +
+ + Featured image + +

If you’ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us?

+

In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. This deep dive into the current state of the framework is also a premise for the next episode, where I will explain how Haystack 2.0 addresses this version’s shortcomings.

+

If you think you already know how Haystack Pipelines work, give this post a chance: I might manage to change your mind.

+

+ A Bit Of History + + + Link to heading + +

+

Interestingly, in the very first releases of Haystack, Pipelines were not a thing. Version 0.1.0 was released with a simpler object, the Finder, that did little more than gluing together a Retriever and a Reader, the two fundamental building blocks of a semantic search application.

+

In the next few months, however, the capabilities of language models expanded to enable many more use cases. One hot topic was hybrid retrieval: a system composed of two different Retrievers, an optional Ranker, and an optional Reader. This kind of application clearly didn’t fit the Finder’s design, so in version 0.6.0 the Pipeline object was introduced: a new abstraction that helped users build applications as a graph of components.

+

Pipeline’s API was a huge step forward from Finder. It instantly enabled seemingly endless combinations of components, unlocked almost all use cases conceivable, and became a foundational Haystack concept meant to stay for a very long time. In fact, the API offered by the first version of Pipeline changed very little since its initial release.

+

This is the snippet included in the release notes of version 0.6.0 to showcase hybrid retrieval. Does it look familiar?

+
p = Pipeline()
+p.add_node(component=es_retriever, name="ESRetriever", inputs=["Query"])
+p.add_node(component=dpr_retriever, name="DPRRetriever", inputs=["Query"])
+p.add_node(component=JoinDocuments(join_mode="concatenate"), name="JoinResults", inputs=["ESRetriever", "DPRRetriever"])
+p.add_node(component=reader, name="QAReader", inputs=["JoinResults"])
+res = p.run(query="What did Einstein work on?", top_k_retriever=1)
+

+ A Powerful Abstraction + + + Link to heading + +

+

One fascinating aspect of this Pipeline model is the simplicity of its user-facing API. In almost all examples, you see only two or three methods used:

+
    +
  • add_node: to add a component to the graph and connect it to the others.
  • +
  • run: to run the Pipeline from start to finish.
  • +
  • draw: to draw the graph of the Pipeline to an image.
  • +
+

At this level, users don’t need to know what kind of data the components need to function, what they produce, or even what the components do: all they need to know is the place they must occupy in the graph for the system to work.

+

For example, as long as the users know that their hybrid retrieval pipeline should look more or less like this (note: this is the output of Pipeline.draw()), translating it into a Haystack Pipeline object using a few add_node calls is mostly straightforward.

+

Hybrid Retrieval

+

This fact is reflected by the documentation of the various components as well. For example, this is how the documentation page for Ranker opens:

+

Ranker Documentation

+

Note how the first information about this component is where to place it. Right after, it specifies its inputs and outputs, even though it’s not immediately clear why we need this information, and then lists which specific classes can cover the role of a Ranker.

+

The message is clear: all Ranker classes are functionally interchangeable, and as long as you place them correctly in the Pipeline, they will fulfill the function of Ranker as you expect them to. Users don’t need to understand what distinguishes CohereRanker from RecentnessReranker unless they want to: the documentation promises that you can swap them safely, and thanks to the Pipeline abstraction, this statement mostly holds true.

+

+ Ready-made Pipelines + + + Link to heading + +

+

But how can the users know which sort of graph they have to build?

+

Most NLP applications are made by a relatively limited number of high-level components: Retriever, Readers, Rankers, plus the occasional Classifier, Translator, or Summarizer. Systems requiring something more than these components used to be really rare, at least when talking about “query” pipelines (more on this later).

+

Therefore, at this level of abstraction, there are just a few graph topologies possible. Better yet, they could each be mapped to high-level use cases such as semantic search, language-agnostic document search, hybrid retrieval, and so on.

+

But the crucial point is that, in most cases, tailoring the application did not require any changes to the graph’s shape. Users only need to identify their use case, find an example or a tutorial defining the shape of the Pipeline they need, and then swap the single components with other instances from the same category until they find the best combination for their exact requirements.

+

This workflow was evident and encouraged: it was the philosophy behind Finder as well, and from version 0.6.0, Haystack immediately provided what are called “Ready-made Pipelines”: objects that initialized the graph on the user’s behalf, and expected as input the components to place in each point of the graph: for example a Reader and a Retriever, in case of simple Extractive QA.

+

With this further abstraction on top of Pipeline, creating an NLP application became an action that doesn’t even require the user to be aware of the existence of the graph. In fact:

+
pipeline = ExtractiveQAPipeline(reader, retriever)
+

is enough to get your Extractive QA applications ready to answer your questions. And you can do so with just another line.

+
answers = pipeline.run(query="What did Einstein work on?")
+

+ “Flexibility powered by DAGs” + + + Link to heading + +

+

This abstraction is extremely powerful for the use cases that it was designed for. There are a few layers of ease of use vs. customization the user can choose from depending on their expertise, which help them progress from a simple ready-made Pipeline to fully custom graphs.

+

However, the focus was oriented so much on the initial stages of the user’s journey that power-users’ needs were sometimes forgotten. Such issues didn’t show immediately, but quickly added friction as soon as the users tried to customize their system beyond the examples from the tutorials and the documentation.

+

For an example of these issues, let’s talk about pipelines with branches. Here are two small, apparently very similar pipelines.

+

Query Classification vs Hybrid Retrieval

+

The first Pipeline represents the Hybrid Retrieval use case we’ve met with before. Here, the Query node sends its outputs to both retrievers, and they both produce some output. For the Reader to make sense of this data, we need a Join node that merges the two lists into one and a Ranker that takes the lists and sorts them again by similarity to the query. Ranker then sends the rearranged list to the Reader.

+

The second Pipeline instead performs a simpler form of Hybrid Retrieval. Here, the Query node sends its outputs to a Query Classifier, which then triggers only one of the two retrievers, the one that is expected to perform better on it. The triggered Retriever then sends its output directly to the Reader, which doesn’t need to know which Retriever the data comes from. So, in this case, we don’t need the Join node.

+

The two pipelines are built as you would expect, with a bunch of add_node calls. You can even run them with the same identical code, which is the same code needed for every other Pipeline we’ve seen so far.

+
pipeline_1 = Pipeline()
+pipeline_1.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["Query"])
+pipeline_1.add_node(component=dense_retriever, name="DenseRetriever", inputs=["Query"])
+pipeline_1.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"])
+pipeline_1.add_node(component=rerank, name="Ranker", inputs=["JoinDocuments"])
+pipeline_1.add_node(component=reader, name="Reader", inputs=["SparseRetriever", "DenseRetriever"])
+
+answers = pipeline_1.run(query="What did Einstein work on?")
+
pipeline_2 = Pipeline()
+pipeline_2.add_node(component=query_classifier, name="QueryClassifier", inputs=["Query"])
+pipeline_2.add_node(component=sparse_retriever, name="DPRRetriever", inputs=["QueryClassifier"])
+pipeline_2.add_node(component=dense_retriever, name="ESRetriever", inputs=["QueryClassifier"])
+pipeline_2.add_node(component=reader, name="Reader", inputs=["SparseRetriever", "DenseRetriever"])
+
+answers = pipeline_2.run(query="What did Einstein work on?")
+

Both pipelines run as you would expect them to. Hooray! Pipelines can branch and join!

+

Now, let’s take the first Pipeline and customize it further.

+

For example, imagine we want to expand language support to include French. The dense Retriever has no issues handling several languages as long as we select a multilingual model; however, the sparse Retriever needs the keywords to match, so we must translate the queries to English to find some relevant documents in our English-only knowledge base.

+

Here is what the Pipeline ends up looking like. Language Classifier sends all French queries over output_1 and all English queries over output_2. In this way, the query passes through the Translator node only if it is written in French.

+

Multilingual Hybrid Retrieval

+
pipeline = Pipeline()
+pipeline.add_node(component=language_classifier, name="LanguageClassifier", inputs=["Query"])
+pipeline.add_node(component=translator, name="Translator", inputs=["LanguageClassifier.output_1"])
+pipeline.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["Translator", "LanguageClassifier.output_2"])
+pipeline.add_node(component=dense_retriever, name="DenseRetriever", inputs=["LanguageClassifier.output_1", "LanguageClassifier.output_2"])
+pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"])
+pipeline.add_node(component=rerank, name="Ranker", inputs=["JoinDocuments"])
+pipeline.add_node(component=reader, name="Reader", inputs=["Ranker"])
+

But… wait. Let’s look again at the graph and at the code. DenseRetriever should receive two inputs from Language Classifier: both output_1 and output_2, because it can handle both languages. What’s going on? Is this a bug in draw()?

+

Thanks to the debug=True parameter of Pipeline.run(), we start inspecting what each node saw during the execution, and we realize quickly that our worst fears are true: this is a bug in the Pipeline implementation. The underlying library powering the Pipeline’s graphs takes the definition of Directed Acyclic Graphs very seriously and does not allow two nodes to be connected by more than one edge. There are, of course, other graph classes supporting this case, but Haystack happens to use the wrong one.

+

Interestingly, Pipeline doesn’t even notice the problem and does not fail. It runs as the drawing suggests: when the query happens to be in French, only the sparse Retriever will process it.

+

Clearly, this is not good for us.

+

Well, let’s look for a workaround. Given that we’re Haystack power users by now, we realize that we can use a Join node with a single input as a “no-op” node. If we put it along one of the edges, that edge won’t directly connect Language Classifier and Dense Retriever, so the bug should be solved.

+

So here is our current Pipeline:

+

Multilingual Hybrid Retrieval with No-Op Joiner

+
pipeline = Pipeline()
+pipeline.add_node(component=language_classifier, name="LanguageClassifier", inputs=["Query"])
+pipeline.add_node(component=translator, name="Translator", inputs=["LanguageClassifier.output_1"])
+pipeline.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["Translator", "LanguageClassifier.output_2"])
+pipeline.add_node(component=no_op_join, name="NoOpJoin", inputs=["LanguageClassifier.output_1"])
+pipeline.add_node(component=dense_retriever, name="DenseRetriever", inputs=["NoOpJoin", "LanguageClassifier.output_2"])
+pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"])
+pipeline.add_node(component=rerank, name="Ranker", inputs=["JoinDocuments"])
+pipeline.add_node(component=reader, name="Reader", inputs=["Ranker"])
+

Great news: the Pipeline now runs as we expect! However, when we run a French query, the results are better but still surprisingly bad.

+

What now? Is the dense Retriever still not running? Is the Translation node doing a poor job?

+

Some debugging later, we realize that the Translator is amazingly good and the Retrievers are both running. But we forgot another piece of the puzzle: Ranker needs the query to be in the same language as the documents. It requires the English version of the query, just like the sparse Retriever does. However, right now, it receives the original French query, and that’s the reason for the lack of performance. We soon realize that this is very important also for the Reader.

+

So… how does the Pipeline pass the query down to the Ranker?

+

Until this point, we didn’t need to know how exactly values are passed from one component to the next. We didn’t need to care about their inputs and outputs at all: Pipeline was doing all this dirty work for us. Suddenly, we need to tell the Pipeline which query to pass to the Ranker and we have no idea how to do that.

+

Worse yet. There is no way to reliably do that. The documentation seems to blissfully ignore the topic, docstrings give us no pointers, and looking at the routing code of Pipeline we quickly get dizzy and cut the chase. We dig through the Pipeline API several times until we’re confident that there’s nothing that can help.

+

Well, there must be at least some workaround. Maybe we can forget about this issue by rearranging the nodes.

+

One easy way out is to translate the query for both retrievers instead of only for the sparse one. This solution also eliminates the NoOpJoin node we introduced earlier, so it doesn’t sound too bad.

+

The Pipeline looks like this now.

+

Multilingual Hybrid Retrieval with two Translators

+
pipeline = Pipeline()
+pipeline.add_node(component=language_classifier, name="LanguageClassifier", inputs=["Query"])
+pipeline.add_node(component=translator, name="Translator", inputs=["LanguageClassifier.output_1"])
+pipeline.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["Translator", "LanguageClassifier.output_2"])
+pipeline.add_node(component=translator_2, name="Translator2", inputs=["LanguageClassifier.output_1"])
+pipeline.add_node(component=dense_retriever, name="DenseRetriever", inputs=["Translator2", "LanguageClassifier.output_2"])
+pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"])
+pipeline.add_node(component=rerank, name="Ranker", inputs=["JoinDocuments"])
+pipeline.add_node(component=reader, name="Reader", inputs=["Ranker"])
+

We now have two nodes that contain identical translator components. Given that they are stateless, we can surely place the same instance in both places, with different names, and avoid doubling its memory footprint just to work around a couple of Pipeline bugs. After all, Translator nodes use relatively heavy models for machine translation.

+

This is what Pipeline replies as soon as we try.

+
PipelineConfigError: Cannot add node 'Translator2'. You have already added the same 
+instance to the Pipeline under the name 'Translator'.
+

Okay, so it seems like we can’t re-use components in two places: there is an explicit check against this, for some reason. Alright, let’s rearrange again this Pipeline with this new constraint in mind.

+

How about we first translate the query and then distribute it?

+

Multilingual Hybrid Retrieval, translate-and-distribute

+
pipeline = Pipeline()
+pipeline.add_node(component=language_classifier, name="LanguageClassifier", inputs=["Query"])
+pipeline.add_node(component=translator, name="Translator", inputs=["LanguageClassifier.output_1"])
+pipeline.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["Translator", "LanguageClassifier.output_2"])
+pipeline.add_node(component=dense_retriever, name="DenseRetriever", inputs=["Translator", "LanguageClassifier.output_2"])
+pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"])
+pipeline.add_node(component=rerank, name="Ranker", inputs=["JoinDocuments"])
+pipeline.add_node(component=reader, name="Reader", inputs=["Ranker"])
+

Looks neat: there is no way now for the original French query to reach Ranker now. Right?

+

We run the pipeline again and soon realize that nothing has changed. The query received by Ranker is still in French, untranslated. Shuffling the order of the add_node calls and the names of the components in the inputs parameters seems to have no effect on the graph. We even try to connect Translator directly with Ranker in a desperate attempt to forward the correct value, but Pipeline now starts throwing obscure, apparently meaningless error messages like:

+
BaseRanker.run() missing 1 required positional argument: 'documents'
+

Isn’t Ranker receiving the documents from JoinDocuments? Where did they go?

+

Having wasted far too much time on this relatively simple Pipeline, we throw the towel, go to Haystack’s Discord server, and ask for help.

+

Soon enough, one of the maintainers shows up and promises a workaround ASAP. You’re skeptical at this point, but the workaround, in fact, exists.

+

It’s just not very pretty.

+

Multilingual Hybrid Retrieval, working version

+
pipeline = Pipeline()
+pipeline.add_node(component=language_classifier, name="LanguageClassifier", inputs=["Query"])
+pipeline.add_node(component=translator_workaround, name="TranslatorWorkaround", inputs=["LanguageClassifier.output_2"])
+pipeline.add_node(component=sparse_retriever, name="SparseRetriever", inputs=["LanguageClassifier.output_1", "TranslatorWorkaround"])
+pipeline.add_node(component=dense_retriever, name="DenseRetriever", inputs=["LanguageClassifier.output_1", "TranslatorWorkaround"])
+pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["SparseRetriever", "DenseRetriever"])
+pipeline.add_node(component=join_query_workaround, name="JoinQueryWorkaround", inputs=["TranslatorWorkaround", "JoinDocuments"])
+pipeline.add_node(component=rerank, name="Ranker", inputs=["JoinQueryWorkaround"])
+pipeline.add_node(component=reader, name="Reader", inputs=["Ranker"])
+

Note that you need two custom nodes: a wrapper for the Translator and a brand-new Join node.

+
class TranslatorWorkaround(TransformersTranslator):
+
+    outgoing_edges = 1
+
+    def run(self, query):
+        results, edge = super().run(query=query)
+        return {**results, "documents": [] }, "output_1"
+
+    def run_batch(self, queries):
+        pass
+
+
+class JoinQueryWorkaround(JoinNode):
+
+    def run_accumulated(self, inputs, *args, **kwargs):
+        return {"query": inputs[0].get("query", None), "documents": inputs[1].get("documents", None)}, "output_1"
+
+    def run_batch_accumulated(self, inputs):
+        pass
+

Along with this beautiful code, we also receive an explanation about how the JoinQueryWorkaround node works only for this specific Pipeline and is pretty hard to generalize, which is why it’s not present in Haystack right now. I’ll spare you the details: you will have an idea why by the end of this journey.

+

Wanna play with this Pipeline yourself and try to make it work in another way? Check out the Colab or the gist and have fun.

+

Having learned only that it’s better not to implement unusual branching patterns with Haystack unless you’re ready for a fight, let’s now turn to the indexing side of your application. We’ll stick to the basics this time.

+

+ Indexing Pipelines + + + Link to heading + +

+

Indexing pipelines’ main goal is to transform files into Documents from which a query pipeline can later retrieve information. They mostly look like the following.

+

Indexing Pipeline

+

And the code looks just like how you would expect it.

+
pipeline = Pipeline()
+pipeline.add_node(component=file_type_classifier, name="FileTypeClassifier", inputs=["File"])
+pipeline.add_node(component=text_converter, name="TextConverter", inputs=["FileTypeClassifier.output_1"])
+pipeline.add_node(component=pdf_converter, name="PdfConverter", inputs=["FileTypeClassifier.output_2"])
+pipeline.add_node(component=docx_converter, name="DocxConverter", inputs=["FileTypeClassifier.output_4"])
+pipeline.add_node(component=join_documents, name="JoinDocuments", inputs=["TextConverter", "PdfConverter", "DocxConverter"])
+pipeline.add_node(component=preprocessor, name="Preprocessor", inputs=["JoinDocuments"])
+pipeline.add_node(component=document_store, name="DocumentStore", inputs=["Preprocessor"])
+
+pipeline.run(file_paths=paths)
+

There is no surprising stuff here. The starting node is File instead of Query, which seems logical given that this Pipeline expects a list of files, not a query. There is a document store at the end which we didn’t use in query pipelines so far, but it’s not looking too strange. It’s all quite intuitive.

+

Indexing pipelines are run by giving them the paths of the files to convert. In this scenario, more than one Converter may run, so we place a Join node before the PreProcessor to make sense of the merge. We make sure that the directory contains only files that we can convert, in this case, .txt, .pdf, and .docx, and then we run the code above.

+

The code, however, fails.

+
ValueError: Multiple non-default file types are not allowed at once.
+

The more we look at the error, the less it makes sense. What are non-default file types? Why are they not allowed at once, and what can I do to fix that?

+

We head for the documentation, where we find a lead.

+

FileTypeClassifier documentation

+

So it seems like the File Classifier can only process the files if they’re all of the same type.

+

After all we’ve been through with the Hybrid Retrieval pipelines, this sounds wrong. We know that Pipeline can run two branches at the same time. We’ve been doing it all the time just a moment ago. Why can’t FileTypeClassifier send data to two converters just like LanguageClassifier sends data to two retrievers?

+

Turns out, this is not the same thing.

+

Let’s compare the three pipelines and try to spot the difference.

+

All branching pipelines, side by side

+

In the first case, Query sends the same identical value to both Retrievers. So, from the component’s perspective, there’s a single output being produced: the Pipeline takes care of copying it for all nodes connected to it.

+

In the second case, QueryClassifier can send the query to either Retriever but never to both. So, the component can produce two different outputs, but at every run, it will always return just one.

+

In the third case, FileTypeClassifier may need to produce two different outputs simultaneously: for example, one with a list of text files and one with a list of PDFs. And it turns out this can’t be done. This is, unfortunately, a well-known limitation of the Pipeline/BaseComponent API design. +The output of a component is defined as a tuple, (output_values, output_edge), and nodes can’t produce a list of these tuples to send different values to different nodes.

+

That’s the end of the story. This time, there is no workaround. You must pass the files individually or forget about using a Pipeline for this task.

+

+ Validation + + + Link to heading + +

+

On top of these challenges, other tradeoffs had to be taken for the API to look so simple at first impact. One of these is connection validation.

+

Let’s imagine we quickly skimmed through a tutorial and got one bit of information wrong: we mistakenly believe that in an Extractive QA Pipeline, you need to place a Reader in front of a Retriever. So we sit down and write this.

+
p = Pipeline()
+p.add_node(component=reader, name="Reader", inputs=["Query"])
+p.add_node(component=retriever, name="Retriever", inputs=["Reader"])
+

Up to this point, running the script raises no error. Haystack is happy to connect these two components in this order. You can even draw() this Pipeline just fine.

+

Swapper Retriever/Reader Pipeline

+

Alright, so what happens when we run it?

+
res = p.run(query="What did Einstein work on?")
+
BaseReader.run() missing 1 required positional argument: 'documents'
+

This is the same error we’ve seen in the translating hybrid retrieval pipeline earlier, but fear not! Here, we can follow the suggestion of the error message by doing:

+
res = p.run(query="What did Einstein work on?", documents=document_store.get_all_documents())
+

And to our surprise, this Pipeline doesn’t crash. It just hangs there, showing an insanely slow progress bar, telling us that some inference is in progress. A few hours later, we kill the process and consider switching to another framework because this one is clearly very slow.

+

What happened?

+

The cause of this issue is the same that makes connecting Haystack components in a Pipeline so effortless, and it’s related to the way components and Pipeline communicate. If you check Pipeline.run()’s signature, you’ll see that it looks like this:

+
def run(
+    self,
+    query: Optional[str] = None,
+    file_paths: Optional[List[str]] = None,
+    labels: Optional[MultiLabel] = None,
+    documents: Optional[List[Document]] = None,
+    meta: Optional[Union[dict, List[dict]]] = None,
+    params: Optional[dict] = None,
+    debug: Optional[bool] = None,
+):
+

which mirrors the BaseComponent.run() signature, the base class nodes have to inherit from.

+
@abstractmethod
+def run(
+    self,
+    query: Optional[str] = None,
+    file_paths: Optional[List[str]] = None,
+    labels: Optional[MultiLabel] = None,
+    documents: Optional[List[Document]] = None,
+    meta: Optional[dict] = None,
+) -> Tuple[Dict, str]:
+

This match means a few things:

+
    +
  • +

    Every component can be connected to every other because their inputs are identical.

    +
  • +
  • +

    Every component can only output the same variables received as input.

    +
  • +
  • +

    It’s impossible to tell if it makes sense to connect two components because their inputs and outputs always match.

    +
  • +
+

Take this with a grain of salt: the actual implementation is far more nuanced than what I just showed you, but the problem is fundamentally this: components are trying to be as compatible as possible with all others and they have no way to signal, to the Pipeline or to the users, that they’re meant to be connected only to some nodes and not to others.

+

In addition to this problem, to respect the shared signature, components often take inputs that they don’t use. A Ranker only needs documents, so all the other inputs required by the run method signature go unused. What do components do with the values? It depends:

+
    +
  • Some have them in the signature and forward them unchanged.
  • +
  • Some have them in the signature and don’t forward them.
  • +
  • Some don’t have them in the signature, breaking the inheritance pattern, and Pipeline reacts by assuming that they should be added unchanged to the output dictionary.
  • +
+

If you check closely the two workaround nodes for the Hybrid Retrieval pipeline we tried to build before, you’ll notice the fix entirely focuses on altering the routing of the unused parameters query and documents to make the Pipeline behave the way the user expects. However, this behavior does not generalize: a different pipeline would require another behavior, which is why the components behave differently in the first place.

+

+ Wrapping up + + + Link to heading + +

+

I could go on for ages talking about the shortcomings of complex Pipelines, but I’d rather stop here.

+

Along this journey into the guts of Haystack Pipelines, we’ve seen at the same time some beautiful APIs and the ugly consequences of their implementation. As always, there’s no free lunch: trying to over-simplify the interface will bite back as soon as the use cases become nontrivial.

+

However, we believe that this concept has a huge potential and that this version of Pipeline can be improved a lot before the impact on the API becomes too heavy. In Haystack 2.0, armed with the experience we gained working with this implementation of Pipeline, we reimplemented it in a fundamentally different way, which will prevent many of these issues.

+

In the next post, we’re going to see how.

+
+

Next: Canals: a new concept of Pipeline

+

Previous: Why rewriting Haystack?!

+

See the entire series here: Haystack 2.0 series

+ +
+ + +
+ + +
+ + + + + +
+ + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/posts/2023-10-15-haystack-series-pipeline/indexing-pipeline.png b/posts/2023-10-15-haystack-series-pipeline/indexing-pipeline.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/indexing-pipeline.png rename to posts/2023-10-15-haystack-series-pipeline/indexing-pipeline.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/miriam-bug-report.png b/posts/2023-10-15-haystack-series-pipeline/miriam-bug-report.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/miriam-bug-report.png rename to posts/2023-10-15-haystack-series-pipeline/miriam-bug-report.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-translate-and-distribute.png b/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-translate-and-distribute.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-translate-and-distribute.png rename to posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-translate-and-distribute.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-two-translators.png b/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-two-translators.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-two-translators.png rename to posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-two-translators.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-with-noop.png b/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-with-noop.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-with-noop.png rename to posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-with-noop.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-workaround.png b/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-workaround.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-workaround.png rename to posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval-workaround.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval.png b/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval.png rename to posts/2023-10-15-haystack-series-pipeline/multilingual-hybrid-retrieval.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/query-classifier-pipeline.png b/posts/2023-10-15-haystack-series-pipeline/query-classifier-pipeline.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/query-classifier-pipeline.png rename to posts/2023-10-15-haystack-series-pipeline/query-classifier-pipeline.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/ranker-docs.png b/posts/2023-10-15-haystack-series-pipeline/ranker-docs.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/ranker-docs.png rename to posts/2023-10-15-haystack-series-pipeline/ranker-docs.png diff --git a/static/posts/2023-10-15-haystack-series-pipeline/swapped-retriever-reader.png b/posts/2023-10-15-haystack-series-pipeline/swapped-retriever-reader.png similarity index 100% rename from static/posts/2023-10-15-haystack-series-pipeline/swapped-retriever-reader.png rename to posts/2023-10-15-haystack-series-pipeline/swapped-retriever-reader.png diff --git a/static/posts/2023-10-26-haystack-series-canals/cover.png b/posts/2023-10-26-haystack-series-canals/cover.png similarity index 100% rename from static/posts/2023-10-26-haystack-series-canals/cover.png rename to posts/2023-10-26-haystack-series-canals/cover.png diff --git a/static/posts/2023-10-26-haystack-series-canals/extractiveqa-pipeline.png b/posts/2023-10-26-haystack-series-canals/extractiveqa-pipeline.png similarity index 100% rename from static/posts/2023-10-26-haystack-series-canals/extractiveqa-pipeline.png rename to posts/2023-10-26-haystack-series-canals/extractiveqa-pipeline.png diff --git a/static/posts/2023-10-26-haystack-series-canals/gen-vs-ext-qa-pipeline.png b/posts/2023-10-26-haystack-series-canals/gen-vs-ext-qa-pipeline.png similarity index 100% rename from static/posts/2023-10-26-haystack-series-canals/gen-vs-ext-qa-pipeline.png rename to posts/2023-10-26-haystack-series-canals/gen-vs-ext-qa-pipeline.png diff --git a/static/posts/2023-10-26-haystack-series-canals/highway.png b/posts/2023-10-26-haystack-series-canals/highway.png similarity index 100% rename from static/posts/2023-10-26-haystack-series-canals/highway.png rename to posts/2023-10-26-haystack-series-canals/highway.png diff --git a/posts/2023-10-26-haystack-series-canals/index.html b/posts/2023-10-26-haystack-series-canals/index.html new file mode 100644 index 00000000..567bb516 --- /dev/null +++ b/posts/2023-10-26-haystack-series-canals/index.html @@ -0,0 +1,654 @@ + + + + + + Canals: a new concept of Pipeline · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Canals: a new concept of Pipeline + +

+
+ +
+ +
+ + Featured image + +

As we have seen in the previous episode of this series, Haystack’s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite.

+

What does this mean in practice? Let’s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1.x counterparts, and the pros and cons of this new paradigm.

+

+ New Use Cases + + + Link to heading + +

+

I’ve already written at length about what made the original Pipeline concept so powerful and its weaknesses. Pipelines were overly effective for the use cases we could conceive while developing them, but they didn’t generalize well on unforeseen situations.

+

For a long time Haystack could afford not to focus on use cases that didn’t fit its architecture, as I have mentioned in my previous post about the reasons for the rewrite. The pipeline was then more than sufficient for its purposes.

+

However, the situation flipped as LLMs and Generative AI entered the scene abruptly at the end of 2022. Pipeline seemingly overfit its existing use cases, fossilized on them, and could not cope with the requirements set by the new landscape of the field.

+

Let’s take one of these use cases and see where it leads us.

+

+ RAG Pipelines + + + Link to heading + +

+

Let’s take one typical example: retrieval augmented generation, or RAG for short. This technique has been used since the very early days of the Generative AI boom as an easy way to strongly reduce hallucinations and improve the alignment of LLMs. The basic idea is: instead of asking directly a question, such as "What's the capital of France?", we send to the model a more complex prompt, that includes both the question and the answer. Such a prompt might be:

+
Given the following paragraph, answer the question.
+
+Paragraph: France is a unitary semi-presidential republic with its capital in Paris, 
+the country's largest city and main cultural and commercial centre; other major urban 
+areas include Marseille, Lyon, Toulouse, Lille, Bordeaux, Strasbourg and Nice.
+
+Question: What's the capital of France?
+
+Answer:
+

In this situation, the task of the LLM becomes far easier: instead of drawing facts from its internal knowledge, which might be lacking, inaccurate, or out-of-date, the model only needs to rephrase the paragraph’s content to answer the question, improving the model’s performance significantly.

+

We now have a new problem, though. How can we provide the correct snippets of text to the LLM? This is where the “retrieval” keyword comes up.

+

One of Haystack’s primary use cases has been Extractive Question Answering: a system where a Retriever component searches into a Document Store (such as a vector or SQL database) for snippets of text that are the most relevant to a given question. It then sends such snippets to a Reader, which highlights the keywords that answer the original question.

+

By replacing a Reader model with an LLM, we get a Retrieval Augmented Generation Pipeline. Easy!

+

Generative vs Extractive QA Pipeline Graph

+

So far, everything checks out. Supporting RAG with Haystack feels not only possible but natural. Let’s take this simple example one step forward: what if, instead of getting the data from a document store, I want to retrieve data from the Internet?

+

+ Web RAG + + + Link to heading + +

+

At first impact, the task may not seem daunting. We surely need a special Retriever that, instead of searching through a DB, searches through the Internet using a search engine. But the core concepts stay the same, and so, we assume, should the pipeline’s graph. The end result should be something like this:

+

Initial Web RAG Pipeline Graph

+

However, the problem doesn’t end there. Search engines return links, which need to be accessed, and the content of the webpage downloaded. Such pages may be extensive and contain artifacts, so the resulting text needs to be cleaned, reduced into paragraphs, potentially embedded by a retrieval model, ranked against the original query, and only the top few resulting pieces of text need to be passed over to the LLM. Just by including these minimal requirements, our pipeline already looks like this:

+

Linear Web RAG Pipeline Graph

+

And we still need to consider that URLs may reference not HTML pages but PDFs, videos, zip files, and so on. We need file converters, zip extractors, audio transcribers, and so on.

+

Multiple File Type Web RAG Pipeline Graph

+

You may notice how this use case moved quickly from looking like a simple query pipeline into a strange overlap of a query and an indexing pipeline. As we’ve learned in the previous post, indexing pipelines have their own set of quirks, one of which is that they can’t simultaneously process files of different types. But we can only expect the Search Engine to retrieve HTML files or PDFs if we filter them out on purpose, which makes the pipeline less effective. In fact, a pipeline that can read content from different file types, such as the one above, can’t really be made to work.

+

And what if, on top of this, we need to cache the resulting documents to reduce latency? What if I wanted to get the results from Google’s page 2, but only if the content of page 1 did not answer our question? At this point, the pipeline is hard to imagine, let alone draw.

+

Although Web RAG is somewhat possible in Haystack, it stretches far beyond what the pipeline was designed to handle. Can we do better?

+

+ Pinpointing the issue + + + Link to heading + +

+

When we went back to the drawing board to address these concerns, the first step was pinpointing the issue.

+

The root problem, as we realized, is that Haystack Pipelines treats each component as a locomotive treats its wagons. They all look the same from the pipeline’s perspective, they can all be connected in any order, and they all go from A to B rolling over the same pair of rails, passing all through the same stations.

+

Cargo Train

+

In Haystack 1, components are designed to serve the pipeline’s needs first. A good component is identical to all the others, provides the exact interface the pipeline requires, and can be connected to any other in any order. The components are awkward to use outside of a pipeline due to the same run() method that makes the pipeline so ergonomic. Why does the Ranker, which needs only a query and a list of Documents to operate, also accept file_paths and meta in its run() method? It does so uniquely to satisfy the pipeline’s requirements, which in turn only exist to make all components forcefully compatible with each other.

+

Just like a locomotive, the pipeline pushes the components over the input data one by one. When seen in this light, it’s painfully obvious why the indexing pipeline we’ve seen earlier can’t work: the “pipeline train” can only go on one branch at a time. Component trains can’t split mid-execution. They are designed to all see the same data all the time. Even when branching happens, all branches always see the same data. Sending different wagons onto different rails is not possible by design.

+

+ Breaking it down + + + Link to heading + +

+

The issue’s core is more evident when seen in this light. The pipeline is the only object that drives the execution, while components tend to be as passive and uniform as possible. This approach doesn’t scale: components are fundamentally different, and asking them to all appear equal forces them to hide their differences, making bugs and odd behavior more likely. As the number of components to handle grows, their variety will increase regardless, so the pipeline must always be aware of all the possibilities to manage them and progressively add edge cases that rapidly increase its complexity.

+

Therefore, the pipeline rewrite for Haystack 2.0 focused on one core principle: the components will define and drive the execution process. There is no locomotive anymore: every component needs to find its way, such as grabbing the data they need from the producers and sending their results to whoever needs them by declaring the proper connections. In the railway metaphor, it’s like adding a steering wheel to each container: the result is a truck, and the resulting system looks now like a highway.

+

Highway

+

Just as railways are excellent at going from A to B when you only need to take a few well-known routes and never another, highways are unbeatable at reaching every possible destination with the same effort, even though they need a driver for each wagon. A “highway” Pipeline requires more work from the Components’ side, but it frees them to go wherever they need to with a precision that a “railway” pipeline cannot accomplish.

+

+ Canals + + + Link to heading + +

+

The code of this new, more powerful Pipeline object found its way into its dedicated library, Canals. By design, Canals is not geared toward specific NLP use cases, but it’s a minimal, generic ETL-like Pipeline library written purely in Python.

+

Canals brings two core elements to the table:

+
    +
  • +

    The Component protocol, a well-defined API that Python classes need to respect to be understood by the pipeline.

    +
  • +
  • +

    The Pipeline object, the graph resolver and execution engine that also performs validation and provides a few utilities on top.

    +
  • +
+

Let’s explore these two concepts one by one.

+
+
All these code snippets were tested against the main branch of Canals. Version 0.10.0 should contain all the features highlighted in this post and you will be soon able to install it with pip install canals==0.10.0.
+
+ +

+ The Pipeline API + + + Link to heading + +

+

The Pipeline object may remind vaguely of Haystack’s original pipeline, and using one should feel familiar. For example, this is how you assemble a simple Canals Pipeline that performs a few additions.

+
from canals import Pipeline
+from sample_components import AddFixedValue
+
+# Create the Pipeline object
+pipeline = Pipeline()
+
+# Add the components - note the missing`inputs` parameter
+pipeline.add_component("add_one", AddFixedValue(add=1))
+pipeline.add_component("add_two", AddFixedValue(add=2))
+
+# Connect them together
+pipeline.connect("add_one.result", "add_two.value")
+
+# Draw the pipeline
+pipeline.draw("two_additions_pipeline.png")
+
+# Run the pipeline
+results = pipeline.run({"add_one": {"value": 1}})
+
+print(results)
+# prints '{"add_two": {"result": 4}}'
+

Creating the pipeline requires no special attention: however, you can now pass a max_loops_allowed parameter, to limit looping when it’s a risk. On the contrary, old Haystack Pipelines did not support loops at all.

+

Next, components are added by calling the Pipeline.add_component(name, component) method. This is also subject to very similar limitations to the previous pipeline.add_node had: +Every component needs a unique name. +Some are reserved (for now, only _debug). +Instances are not reusable. +The object needs to be a component. +However, we no longer connect the components to each other using this function because, although it is possible to implement in principle, it feels more awkward to use in the case of loops.

+

Consequently, we introduced a new method, Pipeline.connect(). This method follows the syntax ("producer_component.output_name_", "consumer_component.input_name"): so we don’t simply line up two components one after the other, but we connect one of their outputs to one of their inputs in an explicit manner.

+

This change allows Canals to perform a much more careful validation of such connections. As we will discover soon, Canals components must declare the type of their inputs and outputs. In this way, Canals not only can make sure that the inputs and outputs exist for the given component, but it can also check whether their types match and can explain connection failures in great detail. For example, if there were a type mismatch, Pipeline.connect() will return an error such as:

+
Cannot connect 'greeter.greeting' with 'add_two.value': their declared input and output 
+types do not match.
+
+greeter:
+- greeting: str
+add_two:
+- value: int (available)
+- add: Optional[int] (available)
+

Once the components are connected together, the resulting pipeline can be drawn. Canals pipeline drawings show far more details than their predecessors because the components are forced to share much more information about what they need to run, the types of these variables, and so on. The pipeline above draws the following image:

+

A Pipeline making two additions

+

You can see how the components classes, their inputs and outputs, and all the connections are named and typed.

+

So, how do you run such a pipeline? By just providing a dictionary of input values. Each starting component should have a small dictionary with all the necessary inputs. In the example above, we pass 1 to the value input of add_one. The results mirror the input’s structure: add_two is at the end of the pipeline, so the pipeline will return a dictionary where under the add_two key there is a dictionary: {"result": 4}.

+

By looking at the diagram, you may have noticed that these two components have optional inputs. They’re not necessary for the pipeline to run, but they can be used to dynamically control the behavior of these components. In this case, add controls the “fixed value” this component adds to its primary input. For example:

+
pipeline.run({"add_one": {"value": 1, "add": 2}})
+# returns '{"add_two": {"result": 5}}'
+
pipeline.run({"add_one": {"value": 1}, "add_two": {"add": 10}})
+# returns '{"add_two": {"result": 12}}'
+

One evident difficulty of this API is that it might be challenging to understand what to provide to the run method for each component. This issue has also been considered: the pipeline offers a Pipeline.inputs() method that returns a structured representation of all the expected input. For our pipeline, it looks like:

+
{
+    "add_one": {
+        "value": {
+            "type": int, 
+            "is_optional": False
+        }, 
+        "add": {
+            "type": typing.Optional[int], 
+            "is_optional": True
+        }
+    }, 
+    "add_two": {
+        "add": {
+            "type": typing.Optional[int], 
+            "is_optional": True
+        }
+    }
+}
+

+ The Component API + + + Link to heading + +

+

Now that we covered the Pipeline’s API, let’s have a look at what it takes for a Python class to be treated as a Canals’ Component.

+

You are going to need:

+
    +
  • +

    A @component decorator. All component classes must be decorated with the @component decorator. This allows Canals to discover and validate them.

    +
  • +
  • +

    A run() method. This is the method where the main functionality of the component should be carried out. It’s invoked by Pipeline.run() and has a few constraints, which we will describe later.

    +
  • +
  • +

    Optionally, a warm_up() method. It can be used to defer the loading of a heavy resource (think a local LLM or an embedding model) to the warm-up stage that occurs right before the first execution of the pipeline. Components that use warm_up() can be added to a Pipeline and connected before the heavy operations are carried out. In this way, the validation that Canals performs at that stage can happen before resources are wasted.

    +
  • +
+

To summarize, a minimal Canals component can look like this:

+
from canals import component
+
+@component
+class Double:
+
+    @component.output_types(result=int)
+    def run(self, value: int):
+        return {"result": value * 2}
+

Note how the run() method has a few peculiar features. One is that all the method parameters need to be typed: if value was not declared as value: int, Canals would raise an exception demanding for typing.

+

This is the way components declare to the pipeline which inputs they expect and of which type: this is the first half of the information needed to perform the validation that Pipeline.connect() carries out.

+

The other half of the information comes from the @component.output_types decorator. Canals demands that components declare how many outputs the component will produce and which type. One may ask why not rely on typing for the outputs, just as we’ve done for the inputs. So why not simply declare components as:

+
@component
+class Double:
+
+    def run(self, value: int) -> int:
+        return value * 2
+

For Double, this is a legitimate solution. However, let’s make an example with another component called CheckParity: if a component’s input value is even, it sends it unchanged over the even output, while if it’s odd, it will send it over the odd output. The following clearly doesn’t work: we’re not communicating anywhere to Canals which output is even and which one is odd.

+
@component
+class CheckParity:
+
+    def run(self, value: int) -> int:
+        if value % 2 == 0:
+            return value
+        return value
+

How about this instead?

+
@component
+class CheckParity:
+
+    def run(self, value: int) -> Dict[str, int]:
+        if value % 2 == 0:
+            return {"even": value}
+        return {"odd": value}
+

This approach carries all the information required. However, such information is only available after the run() method is called. Unless we parse the method to discover all return statements and their keys (which is not always possible), Canals cannot know all the keys the return dictionary may have. So, it can’t validate the connections when Pipeline.connect() is called.

+

The decorator bridges the gap by allowing the class to declare in advance what outputs it will produce and of which type. Pipeline trusts this information to be correct and validates the connections accordingly.

+

Okay, but what if the component is very dynamic? The output type may depend on the input type. Perhaps the number of inputs depends on some initialization parameter. In these cases, Canals allows components to declare the inputs and output types in their init method as such:

+
@component
+class HighlyDynamicComponent:
+
+    def __init__(self, ...):
+        component.set_input_types(self, input_name=input_type, ...)
+        component.set_output_types(self, output_name=output_type, ...)
+
+    def run(self, **kwargs):
+        ...
+

Note that there’s no more typing on run(), and the decorator is gone. The information provided in the init method is sufficient for Canals to validate the connections.

+

One more feature of the inputs and output declarations relates to optional and variadic values. Canals supports both through a mix of type checking and signature inspection. For example, let’s have a look at how the AddFixedValue we’ve seen earlier looks like:

+
from typing import Optional
+from canals import component
+
+
+@component
+class AddFixedValue:
+    """
+    Adds two values together.
+    """
+
+    def __init__(self, add: int = 1):
+        self.add = add
+
+    @component.output_types(result=int)
+    def run(self, value: int, add: Optional[int] = None):
+        """
+        Adds two values together.
+        """
+        if add is None:
+            add = self.add
+        return {"result": value + add}
+

You can see that add, the optional parameter we met before, has a default value. Adding a default value to a parameter in the run() signature tells Canals that the parameter itself is optional, so the component can run even if that specific input doesn’t receive any value from the pipeline’s input or other components.

+

Another component that generalizes the sum operation is Sum, which instead looks like this:

+
from canals import component
+from canals. component.types import Variadic
+
+@component
+class Sum:
+    """
+    Adds all its inputs together.
+    """
+
+    @component.output_types(total=int)
+    def run(self, values: Variadic[int]):
+        """
+        :param values: the values to sum
+        """
+        return {"total": sum(v for v in values if v is not None)}
+

In this case, we used the special Canals type Variadic to tell Canals that the values input can receive data from multiple producers, instead of just one. Therefore, values is going to be a list type, but it can be connected to single int outputs, making it a valuable aggregator.

+

+ Serialization + + + Link to heading + +

+

Just like old Haystack Pipelines, Canals pipelines can be serialized. However, this feature suffered from similar problems plaguing the execution model, so it was changed radically.

+

The original pipeline gathered intrusive information about each of its components when initialized, leveraging the shared BaseComponent class. Conversely, Canal’s Pipelines delegate the serialization process entirely to its components.

+

In Canals, if a component wishes to be serializable, it must provide two additional methods, to_dict and from_dict, which perform serialization and deserialization to a dictionary. The pipeline limits itself to calling each of its component’s methods, collecting their output, grouping them together with some limited extra information (such as the connections between them), and returning the result.

+

For example, if AddFixedValue were serializable, its serialized version could look like this:

+
{
+    "type": "AddFixedValue",
+    "init_parameters": {
+        "add": 1
+    }
+}
+

The entire pipeline we used above would end up as follows:

+
{
+    "max_loops_allowed": 100,
+    "components": {
+        "add_one": {
+            "type": "AddFixedValue",
+            "init_parameters": {
+                "add": 1
+            }
+        },
+        "add_two": {
+            "type": "AddFixedValue",
+            "init_parameters": {
+                "add": 2
+            }
+        }
+    },
+    "connections": [
+        {
+            "sender": "add_one.result", 
+            "receiver": "add_two.value",
+        }
+    ]
+}
+

Notice how the components are free to perform serialization in the way they see fit. The only requirement imposed by Canals is the presence of two top-level keys, type and init_parameters, which are necessary for the pipeline to deserialize each component into the correct class.

+

This is useful, especially if the component’s state includes some non-trivial values, such as objects, API keys, or other special values. Pipeline no longer needs to know how to serialize everything the Components may contain: the task is fully delegated to them, which always knows best what needs to be done.

+

+ But… do we need any of this? + + + Link to heading + +

+

Having done a tour of Canals features, one might have noticed one detail. Pipelines now are a bit harder to use than before: you can’t just chain every component after every other. There are connections to be made, validation to perform, graphs to assemble, and so on.

+

In exchange, the pipeline is now more powerful than before. Sure, but so is a plain Python script. Do we really need the Pipeline object? And what do we need it for?

+

ETL frameworks often include an abstraction over the execution flow to make the same high-level system execute over different infrastructures, primarily for scalability and speed. They may leverage the abstraction to transparently distribute nodes on different machines, run them in parallel, increase throughput by adding replicas and other similar operations.

+

For now, Canals doesn’t provide anything of this kind. While we don’t exclude that in the future, this abstraction may serve this purpose, there are a few other benefits that the pipeline is providing us right now:

+
    +
  • +

    Validation. While components normally validate their inputs and outputs, the pipeline does all the validation before the components run, even before loading heavy resources. This makes the whole system far less likely to fail at runtime for a simple input/output mismatch, which can be priceless for complex applications.

    +
  • +
  • +

    Serialization. Redistributing code is always tricky: redistributing a JSON file is much safer. Pipelines make it possible to represent complex systems in a readable JSON file that can be edited, shared, stored, deployed, and re-deployed on different backends at need.

    +
  • +
  • +

    Drawing: Canals offers a way to see your system clearly and automatically, which is often very handy for debugging, inspecting the system, and collaborating on the pipeline’s design.

    +
  • +
  • +

    On top of this, the pipeline abstraction promotes flatter API surfaces by discouraging components nesting one within the other and providing easy-to-use, single-responsibility components that are easy to reason about.

    +
  • +
+

Having said all of this, however, we don’t believe that the pipeline design makes Haystack win or lose. Pipelines are just a bonus on top of what provides the real value: a broad set of components that reliably perform well-defined tasks. That’s why the Component API does not make the run() method awkward to use outside of a Pipeline: calling Sum.run(values=[1, 2, 3]) feels Pythonic outside of a pipeline and always will.

+

In the following posts, I will explore the world of Haystack components, starting from our now familiar use cases: RAG Pipelines.

+
+

Next: RAG Pipelines from scratch

+

Previous: Haystack’s Pipeline

+

See the entire series here: Haystack 2.0 series

+ +
+ + +
+ + +
+ + + + + +
+ + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/posts/2023-10-26-haystack-series-canals/initial-web-rag-pipeline.png b/posts/2023-10-26-haystack-series-canals/initial-web-rag-pipeline.png similarity index 100% rename from static/posts/2023-10-26-haystack-series-canals/initial-web-rag-pipeline.png rename to posts/2023-10-26-haystack-series-canals/initial-web-rag-pipeline.png diff --git a/static/posts/2023-10-26-haystack-series-canals/linear-web-rag-pipeline.png b/posts/2023-10-26-haystack-series-canals/linear-web-rag-pipeline.png similarity index 100% rename from static/posts/2023-10-26-haystack-series-canals/linear-web-rag-pipeline.png rename to posts/2023-10-26-haystack-series-canals/linear-web-rag-pipeline.png diff --git a/static/posts/2023-10-26-haystack-series-canals/multifile-web-rag-pipeline.png b/posts/2023-10-26-haystack-series-canals/multifile-web-rag-pipeline.png similarity index 100% rename from static/posts/2023-10-26-haystack-series-canals/multifile-web-rag-pipeline.png rename to posts/2023-10-26-haystack-series-canals/multifile-web-rag-pipeline.png diff --git a/static/posts/2023-10-26-haystack-series-canals/rag-pipeline.png b/posts/2023-10-26-haystack-series-canals/rag-pipeline.png similarity index 100% rename from static/posts/2023-10-26-haystack-series-canals/rag-pipeline.png rename to posts/2023-10-26-haystack-series-canals/rag-pipeline.png diff --git a/static/posts/2023-10-26-haystack-series-canals/train.png b/posts/2023-10-26-haystack-series-canals/train.png similarity index 100% rename from static/posts/2023-10-26-haystack-series-canals/train.png rename to posts/2023-10-26-haystack-series-canals/train.png diff --git a/static/posts/2023-10-26-haystack-series-canals/two_additions_pipeline.png b/posts/2023-10-26-haystack-series-canals/two_additions_pipeline.png similarity index 100% rename from static/posts/2023-10-26-haystack-series-canals/two_additions_pipeline.png rename to posts/2023-10-26-haystack-series-canals/two_additions_pipeline.png diff --git a/static/posts/2023-10-27-haystack-series-rag/bm25-rag-pipeline.png b/posts/2023-10-27-haystack-series-rag/bm25-rag-pipeline.png similarity index 100% rename from static/posts/2023-10-27-haystack-series-rag/bm25-rag-pipeline.png rename to posts/2023-10-27-haystack-series-rag/bm25-rag-pipeline.png diff --git a/static/posts/2023-10-27-haystack-series-rag/cover.png b/posts/2023-10-27-haystack-series-rag/cover.png similarity index 100% rename from static/posts/2023-10-27-haystack-series-rag/cover.png rename to posts/2023-10-27-haystack-series-rag/cover.png diff --git a/static/posts/2023-10-27-haystack-series-rag/double-promptbuilder-pipeline.png b/posts/2023-10-27-haystack-series-rag/double-promptbuilder-pipeline.png similarity index 100% rename from static/posts/2023-10-27-haystack-series-rag/double-promptbuilder-pipeline.png rename to posts/2023-10-27-haystack-series-rag/double-promptbuilder-pipeline.png diff --git a/static/posts/2023-10-27-haystack-series-rag/double-variable-promptbuilder-pipeline.png b/posts/2023-10-27-haystack-series-rag/double-variable-promptbuilder-pipeline.png similarity index 100% rename from static/posts/2023-10-27-haystack-series-rag/double-variable-promptbuilder-pipeline.png rename to posts/2023-10-27-haystack-series-rag/double-variable-promptbuilder-pipeline.png diff --git a/static/posts/2023-10-27-haystack-series-rag/elasticsearch-rag-pipeline.png b/posts/2023-10-27-haystack-series-rag/elasticsearch-rag-pipeline.png similarity index 100% rename from static/posts/2023-10-27-haystack-series-rag/elasticsearch-rag-pipeline.png rename to posts/2023-10-27-haystack-series-rag/elasticsearch-rag-pipeline.png diff --git a/posts/2023-10-27-haystack-series-rag/index.html b/posts/2023-10-27-haystack-series-rag/index.html new file mode 100644 index 00000000..b840042e --- /dev/null +++ b/posts/2023-10-27-haystack-series-rag/index.html @@ -0,0 +1,669 @@ + + + + + + RAG Pipelines from scratch · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + RAG Pipelines from scratch + +

+
+ +
+ +
+ + Featured image + +

Last updated: 21/11/2023

+

Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today’s NLP landscape, Haystack must enable it.

+

Let’s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. At that point, the knowledge of the LLM will be only limited by the content of our data store, and all of this can be accomplished without fine-tuning language models.

+
+
💡 I recently gave a talk about RAG applications in Haystack 2.0, so if you prefer videos to blog posts, you can find the recording here. Keep in mind that the code might be slightly outdated.
+
+ +

+ What is RAG? + + + Link to heading + +

+

The idea of Retrieval Augmented Generation was first defined in a paper by Meta in 2020. It was designed to solve a few of the inherent limitations of seq2seq models (language models that, given a sentence, can finish writing it for you), such as:

+
    +
  • Their internal knowledge, as vast as it may be, will always be limited and at least slightly out of date.
  • +
  • They work best on generic topics rather than niche and specific areas unless they’re fine-tuned on purpose, which is a costly and slow process.
  • +
  • All models, even those with subject-matter expertise, tend to “hallucinate”: they confidently produce false statements backed by apparently solid reasoning.
  • +
  • They cannot reliably cite their sources or tell where their knowledge comes from, which makes fact-checking their replies nontrivial.
  • +
+

RAG solves these issues of “grounding” the LLM to reality by providing some relevant, up-to-date, and trusted information to the model together with the question. In this way, the LLM doesn’t need to draw information from its internal knowledge, but it can base its replies on the snippets provided by the user.

+

RAG Paper diagram

+

As you can see in the image above (taken directly from the original paper), a system such as RAG is made of two parts: one that finds text snippets that are relevant to the question asked by the user and a generative model, usually an LLM, that rephrases the snippets into a coherent answer for the question.

+

Let’s build one of these with Haystack 2.0!

+
+
💡 Do you want to see this code in action? Check out the Colab notebook here or the gist here.
+
+ +
+
⚠️ Warning: This code was tested on haystack-ai==0.149.0. Haystack 2.0 is still unstable, so later versions might introduce breaking changes without notice until Haystack 2.0 is officially released. The concepts and components however stay the same.
+
+ +

+ Generators: Haystack’s LLM components + + + Link to heading + +

+

As every NLP framework that deserves its name, Haystack supports LLMs in different ways. The easiest way to query an LLM in Haystack 2.0 is through a Generator component: depending on which LLM and how you intend to query it (chat, text completion, etc…), you should pick the appropriate class.

+

We’re going to use gpt-3.5-turbo (the model behind ChatGPT) for these examples, so the component we need is GPTGenerator. Here is all the code required to use it to query OpenAI’s gpt-3.5-turbo :

+
from haystack.preview.components.generators import GPTGenerator
+
+generator = GPTGenerator(api_key=api_key)
+generator.run(prompt="What's the official language of France?")
+# returns {"replies": ['The official language of France is French.']}
+

You can select your favorite OpenAI model by specifying a model_name at initialization, for example, gpt-4. It also supports setting an api_base_url for private deployments, a streaming_callback if you want to see the output generated live in the terminal, and optional kwargs to let you pass whatever other parameter the model understands, such as the number of answers (n), the temperature (temperature), etc.

+

Note that in this case, we’re passing the API key to the component’s constructor. This is unnecessary: GPTGenerator can read the value from the OPENAI_API_KEY environment variable and also from the api_key module variable of openai’s SDK.

+

Right now, Haystack supports HuggingFace models through the HuggingFaceLocalGenerator and HuggingFaceTGIGenerator components, and many more LLMs are coming soon.

+

+ PromptBuilder: structured prompts from templates + + + Link to heading + +

+

Let’s imagine that our LLM-powered application also comes with some pre-defined questions that the user can select instead of typing in full. For example, instead of asking them to type What's the official language of France?, we let them select Tell me the official languages from a list, and they simply need to type “France” (or “Wakanda” for a change - our chatbot needs some challenges too).

+

In this scenario, we have two pieces of the prompt: a variable (the country name, like “France”) and a prompt template, which in this case is "What's the official language of {{ country }}?"

+

Haystack offers a component that can render variables into prompt templates: it’s called PromptBuilder. As the generators we’ve seen before, also PromptBuilder is nearly trivial to initialize and use.

+
from haystack.preview.components.builders.prompt_builder import PromptBuilder
+
+prompt_builder = PromptBuilder(template="What's the official language of {{ country }}?")
+prompt_builder.run(country="France")
+# returns {'prompt': "What's the official language of France?"}
+

Note how we defined a variable, country, by wrapping its name in double curly brackets. PromptBuilder lets you define any input variable that way: if the prompt template was "What's the official language of {{ nation }}?", the run() method of PromptBuilder would have expected a nation input.

+

This syntax comes from Jinja2, a popular templating library for Python. If you have ever used Flask, Django, or Ansible, you will feel at home with PromptBuilder. Instead, if you never heard of any of these libraries, you can check out the syntax on Jinja’s documentation. Jinja has a powerful templating language and offers way more features than you’ll ever need in prompt templates, ranging from simple if statements and for loops to object access through dot notation, nesting of templates, variables manipulation, macros, full-fledged import and encapsulation of templates, and more.

+

+ A Simple Generative Pipeline + + + Link to heading + +

+

With these two components, we can assemble a minimal pipeline to see how they work together. Connecting them is trivial: PromptBuilder generates a prompt output, and GPTGenerator expects an input with the same name and type.

+
from haystack.preview import Pipeline
+from haystack.preview.components.generators import GPTGenerator
+from haystack.preview.components.builders.prompt_builder import PromptBuilder
+
+pipe = Pipeline()
+pipe.add_component("prompt_builder", PromptBuilder(template="What's the official language of {{ country }}?"))
+pipe.add_component("llm", GPTGenerator(api_key=api_key))
+pipe.connect("prompt_builder", "llm")
+
+pipe.run({"prompt_builder": {"country": "France"}})
+# returns {"llm": {"replies": ['The official language of France is French.'] }}
+

Here is the pipeline graph:

+

Simple LLM pipeline

+

+ Make the LLM cheat + + + Link to heading + +

+

Building the Generative part of a RAG application was very simple! So far, we only provided the question to the LLM, but no information to base its answers on. Nowadays, LLMs possess a lot of general knowledge, so questions about famous countries such as France or Germany are easy for them to reply to correctly. However, when using an app about world countries, some users may be interested in knowing more about obscure or defunct microstates that don’t exist anymore. In this case, ChatGPT is unlikely to provide the correct answer without any help.

+

For example, let’s ask our pipeline something really obscure.

+
pipe.run({"prompt_builder": {"country": "the Republic of Rose Island"}})
+# returns {
+#     "llm": {
+#         "replies": [
+#             'The official language of the Republic of Rose Island was Italian.'
+#         ]
+#     }
+# }
+

The answer is an educated guess but is not accurate: although it was located just outside of Italy’s territorial waters, according to Wikipedia the official language of this short-lived micronation was Esperanto.

+

How can we get ChatGPT to reply to such a question correctly? One way is to make it “cheat” by providing the answer as part of the question. In fact, PromptBuilder is designed to serve precisely this use case.

+

Here is our new, more advanced prompt:

+
Given the following information, answer the question.
+Context: {{ context }}
+Question: {{ question }}
+

Let’s build a new pipeline using this prompt!

+
context_template = """
+Given the following information, answer the question.
+Context: {{ context }}
+Question: {{ question }}
+"""
+language_template = "What's the official language of {{ country }}?"
+
+pipe = Pipeline()
+pipe.add_component("context_prompt", PromptBuilder(template=context_template))
+pipe.add_component("language_prompt", PromptBuilder(template=language_template))
+pipe.add_component("llm", GPTGenerator(api_key=api_key))
+pipe.connect("language_prompt", "context_prompt.question")
+pipe.connect("context_prompt", "llm")
+
+pipe.run({
+    "context_prompt": {"context": "Rose Island had its own government, currency, post office, and commercial establishments, and the official language was Esperanto."}
+    "language_prompt": {"country": "the Republic of Rose Island"}
+})
+# returns {
+#     "llm": {
+#         "replies": [
+#             'The official language of the Republic of Rose Island is Esperanto.'
+#         ]
+#     }
+# }
+

Let’s look at the graph of our Pipeline:

+

Double PromptBuilder pipeline

+

The beauty of PromptBuilder lies in its flexibility. It allows users to chain instances together to assemble complex prompts from simpler schemas: for example, we used the output of the first PromptBuilder as the value of question in the second prompt.

+

However, in this specific scenario, we can build a simpler system by merging the two prompts into one.

+
Given the following information, answer the question.
+Context: {{ context }}
+Question: What's the official language of {{ country }}?
+

Using this new prompt, the resulting pipeline becomes again very similar to our first.

+
template = """
+Given the following information, answer the question.
+Context: {{ context }}
+Question: What's the official language of {{ country }}?
+"""
+pipe = Pipeline()
+pipe.add_component("prompt_builder", PromptBuilder(template=template))
+pipe.add_component("llm", GPTGenerator(api_key=api_key))
+pipe.connect("prompt_builder", "llm")
+
+pipe.run({
+    "prompt_builder": {
+        "context": "Rose Island had its own government, currency, post office, and commercial establishments, and the official language was Esperanto.",
+        "country": "the Republic of Rose Island"
+    }
+})
+# returns {
+#     "llm": {
+#         "replies": [
+#             'The official language of the Republic of Rose Island is Esperanto.'
+#         ]
+#     }
+# }
+

PromptBuilder with two inputs pipeline

+

+ Retrieving the context + + + Link to heading + +

+

For now, we’ve been playing with prompts, but the fundamental question remains unanswered: where do we get the correct text snippet for the question the user is asking? We can’t expect such information as part of the input: we need our system to be able to fetch this information independently, based uniquely on the query.

+

Thankfully, retrieving relevant information from large corpora (a technical term for extensive collections of data, usually text) is a task that Haystack excels at since its inception: the components that perform this task are called Retrievers.

+

Retrieval can be performed on different data sources: to begin, let’s assume we’re searching for data in a local database, which is the use case that most Retrievers are geared towards.

+

Let’s create a small local database to store information about some European countries. Haystack offers a neat object for these small-scale demos: InMemoryDocumentStore. This document store is little more than a Python dictionary under the hood but provides the same exact API as much more powerful data stores and vector stores, such as Elasticsearch or ChromaDB. Keep in mind that the object is called “Document Store” and not simply “datastore” because what it stores is Haystack’s Document objects: a small dataclass that helps other components make sense of the data that they receive.

+

So, let’s initialize an InMemoryDocumentStore and write some Documents into it.

+
from haystack.preview.dataclasses import Document
+from haystack.preview.document_stores import InMemoryDocumentStore
+
+documents = [
+    Document(content="German is the the official language of Germany."), 
+    Document(content="The capital of France is Paris, and its official language is French."),
+    Document(content="Italy recognizes a few official languages, but the most widespread one is Italian."),
+    Document(content="Esperanto has been adopted as official language for some microstates as well, such as the Republic of Rose Island, a short-lived microstate built on a sea platform in the Adriatic Sea.")
+]
+docstore = InMemoryDocumentStore()
+docstore.write_documents(documents=documents)
+
+docstore.filter_documents()
+# returns [
+#     Document(content="German is the the official language of Germany."), 
+#     Document(content="The capital of France is Paris, and its official language is French."),
+#     Document(content="Esperanto has been adopted as official language for some microstates as well, such as the Republic of Rose Island, a short-lived microstate built on a sea platform in the Adriatic Sea."),
+#     Document(content="Italy recognizes a few official languages, but the most widespread one is Italian."),
+# ]
+

Once the document store is set up, we can initialize a retriever. In Haystack 2.0, each document store comes with its own set of highly optimized retrievers: InMemoryDocumentStore offers two, one based on BM25 ranking and one based on embedding similarity.

+

Let’s start with the BM25-based retriever, which is slightly easier to set up. Let’s first use it in isolation to see how it behaves.

+
from haystack.preview.components.retrievers import InMemoryBM25Retriever
+
+retriever = InMemoryBM25Retriever(document_store=docstore)
+retriever.run(query="Rose Island", top_k=1)
+# returns [
+#     Document(content="Esperanto has been adopted as official language for some microstates as well, such as the Republic of Rose Island, a short-lived microstate built on a sea platform in the Adriatic Sea.")
+# ]
+
+retriever.run(query="Rose Island", top_k=3)
+# returns [
+#     Document(content="Esperanto has been adopted as official language for some microstates as well, such as the Republic of Rose Island, a short-lived microstate built on a sea platform in the Adriatic Sea.")
+#     Document(content="Italy recognizes a few official languages, but the most widespread one is Italian."),
+#     Document(content="The capital of France is Paris, and its official language is French."),
+# ]
+

We see that InMemoryBM25Retriever accepts a few parameters. query is the question we want to find relevant documents for. In the case of BM25, the algorithm only searches for exact word matches. The resulting retriever is very fast, but it doesn’t fail gracefully: it can’t handle spelling mistakes, synonyms, or descriptions of an entity. For example, documents containing the word “cat” would be considered irrelevant against a query such as “felines”.

+

top_k controls the number of documents returned. We can see that in the first example, only one document is returned, the correct one. In the second, where top_k = 3, the retriever is forced to return three documents even if just one is relevant, so it picks the other two randomly. Although the behavior is not optimal, BM25 guarantees that if there is a document that is relevant to the query, it will be in the first position, so for now, we can use it with top_k=1.

+

Retrievers also accepts a filters parameter, which lets you pre-filter the documents before retrieval. This is a powerful technique that comes useful in complex applications, but for now we have no use for it. I will talk more in detail about this topic, called metadata filtering, in a later post.

+

Let’s now make use of this new component in our Pipeline.

+

+ Our first RAG Pipeline + + + Link to heading + +

+

The retriever does not return a single string but a list of Documents. How do we put the content of these objects into our prompt template?

+

It’s time to use Jinja’s powerful syntax to do some unpacking on our behalf.

+
Given the following information, answer the question.
+
+Context: 
+{% for document in documents %}
+    {{ document.content }}
+{% endfor %}
+
+Question: What's the official language of {{ country }}?
+

Notice how, despite the slightly alien syntax for a Python programmer, what the template does is reasonably evident: it iterates over the documents and, for each of them, renders their content field.

+

With all these pieces set up, we can finally put them all together.

+
template = """
+Given the following information, answer the question.
+
+Context: 
+{% for document in documents %}
+    {{ document.content }}
+{% endfor %}
+
+Question: What's the official language of {{ country }}?
+"""
+pipe = Pipeline()
+
+pipe.add_component("retriever", InMemoryBM25Retriever(document_store=docstore))
+pipe.add_component("prompt_builder", PromptBuilder(template=template))
+pipe.add_component("llm", GPTGenerator(api_key=api_key))
+pipe.connect("retriever", "prompt_builder.documents")
+pipe.connect("prompt_builder", "llm")
+
+pipe.run({
+   "retriever": {"query": country},
+    "prompt_builder": {
+        "country": "the Republic of Rose Island"
+    }
+})
+# returns {
+#     "llm": {
+#         "replies": [
+#             'The official language of the Republic of Rose Island is Esperanto.'
+#         ]
+#     }
+# }
+

BM25 RAG Pipeline

+

Congratulations! We’ve just built our first, true-to-its-name RAG Pipeline.

+

+ Scaling up: Elasticsearch + + + Link to heading + +

+

So, we now have our running prototype. What does it take to scale this system up for production workloads?

+

Of course, scaling up a system to production readiness is no simple task that can be addressed in a paragraph. Still, we can start this journey with one component that can readily be improved: the document store.

+

InMemoryDocumentStore is clearly a toy implementation: Haystack supports much more performant document stores such as Elasticsearch, ChromaDB and Marqo. Since we have built our app with a BM25 retriever, let’s select Elasticsearch as our production-ready document store of choice.

+

How do we use Elasticsearch on our pipeline? All it takes is to swap out InMemoryDocumentStore and InMemoryBM25Retriever with their Elasticsearch counterparts, which offer nearly identical APIs.

+

First, let’s create the document store: we will need a slightly more complex setup to connect to the Elasticearch backend. In this example, we use Elasticsearch version 8.8.0, but every Elasticsearch 8 version should work.

+
from elasticsearch_haystack.document_store import ElasticsearchDocumentStore
+
+host = os.environ.get("ELASTICSEARCH_HOST", "https://localhost:9200")
+user = "elastic"
+pwd = os.environ["ELASTICSEARCH_PASSWORD"]  # You need to provide this value
+
+docstore = ElasticsearchDocumentStore(
+    hosts=[host], 
+    basic_auth=(user, pwd), 
+    ca_certs="/content/elasticsearch-8.8.0/config/certs/http_ca.crt"
+)
+

Now, let’s write again our four documents into the store. In this case, we specify the duplicate policy, so if the documents were already present, they would be overwritten. All Haystack document stores offer three policies to handle duplicates: FAIL (the default), SKIP, and OVERWRITE.

+
from haystack.preview.document_stores import DuplicatePolicy
+documents = [
+    Document(content="German is the the official language of Germany."), 
+    Document(content="The capital of France is Paris, and its official language is French."),
+    Document(content="Italy recognizes a few official languages, but the most widespread one is Italian."),
+    Document(content="Esperanto has been adopted as official language for some microstates as well, such as the Republic of Rose Island, a short-lived microstate built on a sea platform in the Adriatic Sea.")
+]
+docstore.write_documents(documents=documents, policy=DuplicatePolicy.OVERWRITE)
+

Once this is done, we are ready to build the same pipeline as before, but using ElasticsearchBM25Retriever.

+
from elasticsearch_haystack.bm25_retriever import ElasticsearchBM25Retriever
+
+template = """
+Given the following information, answer the question.
+
+Context: 
+{% for document in documents %}
+    {{ document.content }}
+{% endfor %}
+
+Question: What's the official language of {{ country }}?
+"""
+
+pipe = Pipeline()
+pipe.add_component("retriever", ElasticsearchBM25Retriever(document_store=docstore))
+pipe.add_component("prompt_builder", PromptBuilder(template=template))
+pipe.add_component("llm", GPTGenerator(api_key=api_key))
+pipe.connect("retriever", "prompt_builder.documents")
+pipe.connect("prompt_builder", "llm")
+
+pipe.draw("elasticsearch-rag-pipeline.png")
+
+country = "the Republic of Rose Island"
+pipe.run({
+    "retriever": {"query": country},
+    "prompt_builder": {"country": country}
+})
+# returns {
+#     "llm": {
+#         "replies": [
+#             'The official language of the Republic of Rose Island is Esperanto.'
+#         ]
+#     }
+# }
+

Elasticsearch RAG Pipeline

+

That’s it! We’re now running the same pipeline over a production-ready Elasticsearch instance.

+

+ Wrapping up + + + Link to heading + +

+

In this post, we’ve detailed some fundamental components that make RAG applications possible with Haystack: Generators, the PromptBuilder, and Retrievers. We’ve seen how they can all be used in isolation and how you can make Pipelines out of them to achieve the same goal. Last, we’ve experimented with some of the (very early!) features that make Haystack 2.0 production-ready and easy to scale up from a simple demo with minimal changes.

+

However, this is just the start of our journey into RAG. Stay tuned!

+
+

Next: Indexing data for RAG applications

+

Previous: Canals: a new concept of Pipeline

+

See the entire series here: Haystack 2.0 series

+

Cover image from Wikipedia

+ +
+ + +
+ + +
+ + + + + +
+ + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/posts/2023-10-27-haystack-series-rag/rag-paper-image.png b/posts/2023-10-27-haystack-series-rag/rag-paper-image.png similarity index 100% rename from static/posts/2023-10-27-haystack-series-rag/rag-paper-image.png rename to posts/2023-10-27-haystack-series-rag/rag-paper-image.png diff --git a/static/posts/2023-10-27-haystack-series-rag/simple-llm-pipeline.png b/posts/2023-10-27-haystack-series-rag/simple-llm-pipeline.png similarity index 100% rename from static/posts/2023-10-27-haystack-series-rag/simple-llm-pipeline.png rename to posts/2023-10-27-haystack-series-rag/simple-llm-pipeline.png diff --git a/static/posts/2023-11-05-haystack-series-minimal-indexing/cover.png b/posts/2023-11-05-haystack-series-minimal-indexing/cover.png similarity index 100% rename from static/posts/2023-11-05-haystack-series-minimal-indexing/cover.png rename to posts/2023-11-05-haystack-series-minimal-indexing/cover.png diff --git a/posts/2023-11-05-haystack-series-minimal-indexing/index.html b/posts/2023-11-05-haystack-series-minimal-indexing/index.html new file mode 100644 index 00000000..de343278 --- /dev/null +++ b/posts/2023-11-05-haystack-series-minimal-indexing/index.html @@ -0,0 +1,510 @@ + + + + + + Indexing data for RAG applications · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Indexing data for RAG applications + +

+
+ +
+ +
+ + Featured image + +

In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn’t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem?

+

In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval.

+
+
💡 Do you want to see the code in action? Check out the Colab notebook or the gist.
+
+ +
+
⚠️ Warning: This code was tested on haystack-ai==0.117.0. Haystack 2.0 is still unstable, so later versions might introduce breaking changes without notice until Haystack 2.0 is officially released. The concepts and components, however, stay the same.
+
+ +

+ The task + + + Link to heading + +

+

In Haystack’s terminology, the process of extracting information from a group of files and storing the data in a document store is called “indexing”. The process includes, at the very minimum, reading the content of a file, generating a Document object containing all its text, and then storing it in a document store.

+

However, indexing pipelines often do more than this. They can process more than one file type, like .txt, .pdf, .docx, .html, audio, video, and images. Having many file types to convert, they route each file to the proper converter based on its type. Files tend to contain way more text than a normal LLM can chew, so they need to split those huge Documents into smaller chunks. Also, the converters are not perfect at reading text from the files, so they need to clean the data from artifacts such as page numbers, headers, footers, and so on. On top of all of this, if you plan to use a retriever that is based on embedding similarity, your indexing pipeline will also need to embed all documents before writing them into the store.

+

Sounds like a lot of work!

+

In this post, we will focus on the preprocessing part of the pipeline: cleaning, splitting, and writing documents. I will talk about the other functionalities of indexing pipelines, such as document embedding and multiple file types routing, in later posts.

+

+ Converting files + + + Link to heading + +

+

As we’ve just seen, the most important task of this pipeline is to convert files into Documents. Haystack provides several converters for this task: at the time of writing, it supports:

+
    +
  • Raw text files (TextFileToDocument)
  • +
  • HTML files, so web pages in general (HTMLToDocument)
  • +
  • PDF files, by extracting text natively (PyPDFToDocument)
  • +
  • Image files, PDFs with images, and Office files with images, by OCR (AzureOCRDocumentConverter)
  • +
  • Audio files, doing transcription with Whisper either locally (LocalWhisperTranscriber) or remotely using OpenAI’s hosted models (RemoteWhisperTranscriber)
  • +
  • A ton of other formats, such as Microsoft’s Office formats, thanks to Apache Tika (TikaDocumentConverter)
  • +
+

For this example, let’s assume we have a collection of web pages downloaded from the Internet. These pages are our only source of information and contain all we want our RAG application to know about.

+

In this case, our converter of choice is HTMLToDocument. HTMLToDocument is a Haystack component that understands HTML and can filter all the markup away, leaving only meaningful text. Remember that this is a file converter, not a URL fetcher: it can only process local files, such as a website crawl. Haystack provides some components to fetch web pages, but we will see them later.

+

Here is how you can use this converter:

+
from haystack.preview.components.file_converters.html import HTMLToDocument
+
+path = "Republic_of_Rose_Island.html"
+
+converter = HTMLToDocument()
+converter.run(sources=[path])
+
+# returns {"documents": [Document(content="The Republic of Rose Isla...")]}
+

HTMLToDocument is a straightforward component that offers close to no parameters to customize its behavior. Of its API, one notable feature is its input type: this converter can take paths to local files in the form of strings or Path objects, but it also accepts ByteStream objects.

+

ByteStream is a handy Haystack abstraction that makes handling binary streams easier. If a component accepts ByteStream as input, you don’t necessarily have to save your web pages to file before passing them to this converter. This allows components that retrieve large files from the Internet to pipe their output directly into this component without saving the data to disk first, which can save a lot of time.

+

+ Cleaning the text + + + Link to heading + +

+

With HTMLToDocument, we can convert whole web pages into large Document objects. The converter typically does a decent job of filtering out the markup. Still, it’s not always perfect. To compensate for these occasional issues, Haystack offers a component called DocumentCleaner that can remove noise from the text of the documents.

+

Just like any other component, DocumentCleaner is straightforward to use:

+
from haystack.preview.components.preprocessors.document_cleaner import DocumentCleaner
+
+cleaner = DocumentCleaner()
+cleaner.run(documents=documents)
+# returns {"documents": [Document(content=...), Document(content=...), ...]}
+

The effectiveness of DocumentCleaner depends a lot on the type of converter you use. Some flags, such as remove_empty_lines and remove_extra_whitespace, are minor fixes that can come in handy but usually have little impact on the quality of the results when used in a RAG pipeline. They can, however, make a vast difference for Extractive QA pipelines.

+

Other parameters, like remove_substrings or remove_regex, work very well but need manual inspection and iteration from a human to get right. For example, for Wikipedia pages, we could use these parameters to remove all instances of the word "Wikipedia", which are undoubtedly many and irrelevant.

+

Finally, remove_repeated_substrings is a convenient method that removes headers and footers from long text, for example, books and articles. However, it works only for PDFs and, to a limited degree, for text files because it relies on the presence of form feed characters (\f), which are rarely present in web pages.

+

+ Splitting the text + + + Link to heading + +

+

Now that the text is cleaned up, we can move onto a more exciting step: text splitting.

+

So far, each Document stored the content of an entire file. If a file was a whole book with hundreds of pages, a single Document would contain hundreds of thousands of words, which is clearly too much for an LLM to make sense of. Such a large Document is also challenging for Retrievers to understand because it contains so much text that it looks relevant to every possible question. To populate our document store with data that can be used effectively by a RAG pipeline, we need to chunk this data into much smaller Documents.

+

That’s where TextDocumentSplitter comes into play.

+
+

💡 With LLMs in a race to offer the largest context window and research showing that such a chase is counterproductive, there is no general consensus about how splitting Documents for RAG impacts the LLM’s performance.

+

What you need to keep in mind is that splitting implies a tradeoff. Huge documents will always be slightly relevant for every question, but they will bring a lot of context, which may or may not confuse the model. On the other hand, tiny Documents are much more likely to be retrieved only for questions they’re highly relevant for, but they might provide too little context for the LLM to really understand their meaning.

+

Tweaking the size of your Documents for the specific LLM you’re using and the topic of your documents is one way to optimize your RAG pipeline, so be ready to experiment with different Document sizes before committing to one.

+
+ +

How is it used?

+
from haystack.preview.components.preprocessors.text_document_splitter import TextDocumentSplitter
+
+text_splitter = TextDocumentSplitter(split_by="sentence", split_length=5)
+text_splitter.run(documents=documents)
+
+# returns {"documents": [Document(content=...), Document(content=...), ...]}
+

TextDocumentSplitter lets you configure the approximate size of the chunks you want to generate with three parameters: split_by, split_length, and split_overlap.

+

split_by defines the unit to use when splitting some text. For now, the options are word, sentence, and passage (paragraph), but we will soon add other options.

+

split_length is the number of the units defined above each document should include. For example, if the unit is sentence, split_length=10 means that all your Documents will contain 10 sentences worth of text (except usually for the last document, which may have less). If the unit was word, it would instead contain 10 words.

+

split_overlap is the amount of units that should be included from the previous Document. For example, if the unit is sentence and the length is 10, setting split_overlap=2 means that the last two sentences of the first document will also be present at the start of the second, which will include only 8 new sentences for a total of 10. Such repetition carries over to the end of the text to split.

+

+ Writing to the store + + + Link to heading + +

+

Once all of this is done, we can finally move on to the last step of our journey: writing the Documents into our document store. We first create the document store:

+
from haystack.preview.document_stores import InMemoryDocumentStore
+
+document_store = InMemoryDocumentStore()
+

and then use DocumentWriter to actually write the documents in:

+
from haystack.preview.components.writers import DocumentWriter
+
+writer = DocumentWriter(document_store=document_store)
+writer.run(documents=documents_with_embeddings)
+# returns {"documents_written": 120}
+

If you’ve read my previous post about RAG pipelines, you may wonder: why use DocumentWriter when we could call the .write_documents() method of our document store?

+

In fact, the two methods are fully equivalent: DocumentWriter does nothing more than calling the .write_documents() method of the document store. The difference is that DocumentWriter is the way to go if you are using a Pipeline, which is what we’re going to do next.

+

+ Putting it all together + + + Link to heading + +

+

We finally have all the components we need to go from a list of web pages to a document store populated with clean and short Document objects. Let’s build a Pipeline to sum up this process:

+
from haystack.preview import Pipeline
+
+document_store = InMemoryDocumentStore()
+
+pipeline = Pipeline()
+pipeline.add_component("converter", HTMLToDocument())
+pipeline.add_component("cleaner", DocumentCleaner())
+pipeline.add_component("splitter", TextDocumentSplitter(split_by="sentence", split_length=5))
+pipeline.add_component("writer", DocumentWriter(document_store=document_store))
+pipeline.connect("converter", "cleaner")
+pipeline.connect("cleaner", "splitter")
+pipeline.connect("splitter", "writer")
+
+pipeline.draw("simple-indexing-pipeline.png")
+
+pipeline.run({"converter": {"sources": file_names}})
+

Indexing Pipeline

+

That’s it! We now have a fully functional indexing pipeline that can take a list of web pages and convert them into Documents that our RAG pipeline can use. As long as the RAG pipeline reads from the same store we are writing the Documents to, we can add as many Documents as we need to keep the chatbot’s answers up to date without having to touch the RAG pipeline.

+

To try it out, we only need to take the RAG pipeline we built in my previous post and connect it to the same document store we just populated:

+
from haystack.preview.components.generators.openai.gpt import GPTGenerator
+from haystack.preview.components.builders.prompt_builder import PromptBuilder
+from haystack.preview.components.retrievers.in_memory_bm25_retriever import InMemoryBM25Retriever
+
+template = """
+Given the following information, answer the question: {{ question }}
+
+{% for document in documents %}
+    {{ document.content }}
+{% endfor %}
+"""
+pipe = Pipeline()
+
+pipe.add_component("retriever", InMemoryBM25Retriever(document_store=document_store))
+pipe.add_component("prompt_builder", PromptBuilder(template=template))
+pipe.add_component("llm", GPTGenerator(api_key=api_key))
+pipe.connect("retriever", "prompt_builder.documents")
+pipe.connect("prompt_builder", "llm")
+
+question = "Is there any documentary about the story of Rose Island? Can you tell me something about that?"
+pipe.run({
+    "retriever": {"query": question},
+    "prompt_builder": {"question": question}
+})
+
+# returns {
+#     'llm': {
+#         'replies': [
+#             'Yes, there is a documentary about the story of Rose Island. It is 
+#             called "Rose Island" and was released on Netflix on 8 December 2020. 
+#             The documentary follows the true story of Giorgio Rosa, an Italian 
+#             engineer who built his own island in the Adriatic sea in the late 
+#             1960s. The island housed a restaurant, bar, souvenir shop, and even 
+#             a post office. Rosa\'s goal was to have his self-made structure 
+#             recognized as an independent state, leading to a battle with the 
+#             Italian authorities. The film depicts the construction of the island 
+#             and Rosa\'s refusal to dismantle it despite government demands. The 
+#             story of Rose Island was relatively unknown until the release of the 
+#             documentary. The film showcases the technology Rosa invented to build 
+#             the island and explores themes of freedom and resilience.'
+#         ],
+#         'metadata': [...]
+#     }
+# }
+

And suddenly, our chatbot knows everything about Rose Island without us having to feed the data to the document store by hand.

+

+ Wrapping up + + + Link to heading + +

+

Indexing pipelines can be powerful tools, even in their simplest form, like the one we just built. However, it doesn’t end here: Haystack offers many more facilities to extend what’s possible with indexing pipelines, like doing web searches, downloading files from the web, processing many other file types, and so on.

+

We will see how soon, so stay tuned!

+
+

Next: The World of Web RAG

+

Previous: RAG Pipelines from scratch

+

See the entire series here: Haystack 2.0 series

+

Cover image from this website.

+ +
+ + +
+ + +
+ + + + + +
+ + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/posts/2023-11-05-haystack-series-minimal-indexing/simple-indexing-pipeline.png b/posts/2023-11-05-haystack-series-minimal-indexing/simple-indexing-pipeline.png similarity index 100% rename from static/posts/2023-11-05-haystack-series-minimal-indexing/simple-indexing-pipeline.png rename to posts/2023-11-05-haystack-series-minimal-indexing/simple-indexing-pipeline.png diff --git a/static/posts/2023-11-09-haystack-series-simple-web-rag/bm25-rag-pipeline.png b/posts/2023-11-09-haystack-series-simple-web-rag/bm25-rag-pipeline.png similarity index 100% rename from static/posts/2023-11-09-haystack-series-simple-web-rag/bm25-rag-pipeline.png rename to posts/2023-11-09-haystack-series-simple-web-rag/bm25-rag-pipeline.png diff --git a/static/posts/2023-11-09-haystack-series-simple-web-rag/cover.jpeg b/posts/2023-11-09-haystack-series-simple-web-rag/cover.jpeg similarity index 100% rename from static/posts/2023-11-09-haystack-series-simple-web-rag/cover.jpeg rename to posts/2023-11-09-haystack-series-simple-web-rag/cover.jpeg diff --git a/static/posts/2023-11-09-haystack-series-simple-web-rag/html-web-rag-pipeline.png b/posts/2023-11-09-haystack-series-simple-web-rag/html-web-rag-pipeline.png similarity index 100% rename from static/posts/2023-11-09-haystack-series-simple-web-rag/html-web-rag-pipeline.png rename to posts/2023-11-09-haystack-series-simple-web-rag/html-web-rag-pipeline.png diff --git a/static/posts/2023-11-09-haystack-series-simple-web-rag/incorrect-web-rag-pipeline.png b/posts/2023-11-09-haystack-series-simple-web-rag/incorrect-web-rag-pipeline.png similarity index 100% rename from static/posts/2023-11-09-haystack-series-simple-web-rag/incorrect-web-rag-pipeline.png rename to posts/2023-11-09-haystack-series-simple-web-rag/incorrect-web-rag-pipeline.png diff --git a/posts/2023-11-09-haystack-series-simple-web-rag/index.html b/posts/2023-11-09-haystack-series-simple-web-rag/index.html new file mode 100644 index 00000000..d06a1916 --- /dev/null +++ b/posts/2023-11-09-haystack-series-simple-web-rag/index.html @@ -0,0 +1,629 @@ + + + + + + The World of Web RAG · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + The World of Web RAG + +

+
+ +
+ +
+ + Featured image + +

In an earlier post of the Haystack 2.0 series, we’ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on.

+

In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions.

+
+
💡 Do you want to see the code in action? Check out the Colab notebook or the gist.
+
+ +
+
⚠️ Warning: This code was tested on haystack-ai==0.130.0. Haystack 2.0 is still unstable, so later versions might introduce breaking changes without notice until Haystack 2.0 is officially released. The concepts and components, however, stay the same.
+
+ +

+ Searching the Web + + + Link to heading + +

+

As we’ve seen earlier, a Haystack RAG Pipeline is made of three components: a Retriever, a PromptBuilder, and a Generator, and looks like this:

+

BM25 RAG Pipeline

+

To make this pipeline use the Web as its data source, we need to change the retriever with a component that does not look into a local document store for information but can search the web.

+

Haystack 2.0 already provides a search engine component called SerperDevWebSearch. It uses SerperDev’s API to query popular search engines and return two types of data: a list of text snippets coming from the search engine’s preview boxes and a list of links, which point to the top search results.

+

To begin, let’s see how to use this component in isolation.

+
from haystack.preview.components.websearch import SerperDevWebSearch
+
+question = "What's the official language of the Republic of Rose Island?"
+
+search = SerperDevWebSearch(api_key=serperdev_api_key)
+results = search.run(query=question)
+# returns {
+#     "documents": [
+#         Document(content='Esperanto', meta={'title': 'Republic of Rose Island - Wikipedia', 'link': 'https://en.wikipedia.org/wiki/Republic_of_Rose_Island'}),
+#         Document(content="The Republic of Rose Island was a short-lived micronation on a man-made platform in the Adriatic Sea. It's a story that few people knew of until recently, ...", meta={'title': 'Rose Island - The story of a micronation', 'link': 'https://www.rose-island.co/', 'imageUrl': 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQiRCfTO6OwFS32SX37S-7OadDZCNK6Fy_NZVGsci2gcIS-zcinhOcGhgU&s', 'position': 1},
+#         ...
+#     ], 
+#     "links": [
+#         'https://www.rose-island.co/',
+#         'https://www.defactoborders.org/places/rose-island',
+#         ...
+#     ]
+# }
+

SerperDevWebSearch is a component with a simple interface. Starting from its output, we can see that it returns not one but two different values in the returned dictionary: documents and links.

+

links is the most straightforward and represents the top results that Google found relevant for the input query. It’s a list of strings, each containing a URL. You can configure the number of links to return with the top_k init parameter.

+

documents instead is a list of already fully formed Document objects. The content of these objects corresponds to the “answer boxes” that Google often returns together with its search results. Given that these code snippets are usually clean and short pieces of text, they’re perfect to be fed directly to an LLM without further processing.

+

Other than expecting an API key as an init parameter and top_k to control the number of results, SerperDevWebSearch also accepts an allowed_domains parameter, which lets you configure the domains Google is allowed to look into during search, and search_params, a more generic dictionary input that lets you pass any additional search parameter SerperDev’s API understand.

+

+ A Minimal Web RAG Pipeline + + + Link to heading + +

+

SerperDevWebSearch is actually the bare minimum we need to be able to build our very first Web RAG Pipeline. All we need to do is replace our original example’s Retriever with our search component.

+

This is the result:

+
from haystack.preview import Pipeline
+from haystack.preview.components.builders import PromptBuilder
+from haystack.preview.components.generators import GPTGenerator
+
+template = """
+Question: {{ question }}
+
+Google Search Answer Boxes:
+{% for document in documents %}
+    {{ document.content }}
+{% endfor %}
+
+Please reformulate the information above to 
+answer the user's question.
+"""
+pipe = Pipeline()
+
+pipe.add_component("search", SerperDevWebSearch(api_key=serperdev_api_key))
+pipe.add_component("prompt_builder", PromptBuilder(template=template))
+pipe.add_component("llm", GPTGenerator(api_key=api_key))
+pipe.connect("search.documents", "prompt_builder.documents")
+pipe.connect("prompt_builder", "llm")
+
+question = "What's the official language of the Republic of Rose Island?"
+pipe.run({
+    "search": {"query": question},
+    "prompt_builder": {"question": question}
+})
+# returns {
+#     'llm': {
+#         'replies': [
+#             "The official language of the Republic of Rose Island is Esperanto. This artificial language was chosen by the residents of Rose Island as their national language when they declared independence in 1968. However, it's important to note that despite having their own language, government, currency, and postal service, Rose Island was never officially recognized as an independent nation by any country."
+#         ],
+#         'metadata': [...]
+#     }
+# }
+

Minimal Web RAG Pipeline

+

This solution is already quite effective for simple questions because Google does most of the heavy lifting of reading the content of the top results, extracting the relevant snippets, and packaging them up in a way that is really easy to access and understand by the model.

+

However, there are situations in which this approach is not sufficient. For example, for highly technical or nuanced questions, the answer box does not provide enough context for the LLM to elaborate and grasp the entire scope of the discussion. In these situations, we may need to turn to the second output of SerperDevWebSearch: the links.

+

+ Fetching URLs + + + Link to heading + +

+

Haystack offers components to read the content of a URL: it’s LinkContentFetcher. Let’s see this component in action.

+
from haystack.preview.components.fetchers.link_content import LinkContentFetcher
+
+fetcher = LinkContentFetcher()
+fetcher.run(urls=["https://en.wikipedia.org/wiki/Republic_of_Rose_Island"])
+# returns {
+#     "streams": [
+#         ByteStream(data=b"<DOCTYPE html>\n<...")
+#     ]
+# }
+

First, let’s notice that LinkContentFetcher outputs a list of ByteStream objects. ByteStream is a Haystack abstraction that makes handling binary streams and files equally easy. When a component produces ByteStream as output, you can directly pass these objects to a Converter component that can extract its textual content without saving such binary content to a file.

+

These features come in handy to connect LinkContentFetcher to a component we’ve already met before: HTMLToDocument.

+

+ Processing the page + + + Link to heading + +

+

In a previous post, we’ve seen how Haystack can convert web pages into clean Documents ready to be stored in a Document Store. We will reuse many of the components we have discussed there, so if you missed it, make sure to check it out.

+

From the pipeline in question, we’re interested in three of its components: HTMLToDocument, DocumentCleaner, and DocumentSplitter. Once the search component returns the links and LinkContentFetcher downloaded their content, we can connect it to HTMLToDocument to extract the text and DocumentCleaner and DocumentSplitter to clean and chunk the content, respectively. These documents then can go to the PromptBuilder, resulting in a pipeline such as this:

+
template = """
+Question: {{ question }}
+
+Context:
+{% for document in documents %}
+    {{ document.content }}
+{% endfor %}
+
+Please reformulate the information above to answer the user's question.
+"""
+pipe = Pipeline()
+
+pipe.add_component("search", SerperDevWebSearch(api_key=serperdev_api_key))
+pipe.add_component("fetcher", LinkContentFetcher())
+pipe.add_component("converter", HTMLToDocument())
+pipe.add_component("cleaner", DocumentCleaner())
+pipe.add_component("splitter", DocumentSplitter(split_by="sentence", split_length=3))
+pipe.add_component("prompt_builder", PromptBuilder(template=template))
+pipe.add_component("llm", GPTGenerator(api_key=api_key))
+pipe.connect("search.links", "fetcher")
+pipe.connect("fetcher", "converter")
+pipe.connect("converter", "cleaner")
+pipe.connect("cleaner", "splitter")
+pipe.connect("splitter", "prompt_builder.documents")
+pipe.connect("prompt_builder", "llm")
+
+question = "What's the official language of the Republic of Rose Island?"
+pipe.run({
+    "search": {"query": question},
+    "prompt_builder": {"question": question}
+})
+

Incorrect Web RAG Pipeline

+

However, running this pipeline results in a crash.

+
PipelineRuntimeError: llm raised 'InvalidRequestError: This model's maximum context 
+length is 4097 tokens. However, your messages resulted in 4911 tokens. Please reduce 
+the length of the messages.'
+

Reading the error message reveals the issue right away: the LLM received too much text. And that’s to be expected because we just passed the entire content of several web pages to it.

+

We need to find a way to filter only the most relevant documents from the long list that is generated by DocumentSplitter.

+

+ Ranking Documents on the fly + + + Link to heading + +

+

Retrievers are optimized to use the efficient retrieval engines of document stores to sift quickly through vast collections of Documents. However, Haystack also provides smaller, standalone components that work very well on shorter lists and don’t require a full-blown vector database engine to function.

+

These components are called rankers. One example of such a component is TransformersSimilarityRanker: a ranker that uses a model from the transformers library to rank Documents by their similarity to a given query.

+

Let’s see how it works:

+
from haystack.preview.components.rankers.transformers_similarity import TransformersSimilarityRanker
+
+ranker = TransformersSimilarityRanker()
+ranker.warm_up()
+ranker.run(
+    query="What's the official language of the Republic of Rose Island?",
+    documents=documents,
+    top_k=1
+  )
+# returns {
+#     'documents': [
+#         Document(content="Island under construction\nRepublic of Rose Island\nThe Republic of Rose Island ( Esperanto : Respubliko de la Insulo de la Rozoj; Italian : Repubblica dell'Isola delle Rose) was a short-lived micronation on a man-made platform in the Adriatic Sea , 11 kilometres (6.8\xa0mi) off the coast of the province of Rimini , Italy, built by Italian engineer Giorgio Rosa, who made himself its president and declared it an independent state on 1 May 1968. [1] [2] Rose Island had its own government, currency, post office, and commercial establishments, and the official language was Esperanto .", meta={'source_id': '03bfe5f7b7a7ec623e854d2bc5eb36ba3cdf06e1e2771b3a529eeb7e669431b6'}, score=7.594357490539551)
+#     ]
+# }
+

This component has a feature we haven’t encountered before: the warm_up() method.

+

Components that need to initialize heavy resources, such as a language model, always perform this operation after initializing them in the warm_up() method. When they are used in a Pipeline, Pipeline.run() takes care of calling warm_up() on all components before running; when used standalone, users need to call warm_up() explicitly to prepare the object to run.

+

TransformersSimilarityRanker accepts a few parameters. When initialized, it accepts a model_name_or_path with the HuggingFace ID of the model to use for ranking: this value defaults to cross-encoder/ms-marco-MiniLM-L-6-v2. It also takes token, to allow users to download private models from the Models Hub, device, to let them leverage PyTorch’s ability to select the hardware to run on, and top_k, the maximum number of documents to return. top_k, as we see above, can also be passed to run(), and the latter overcomes the former if both are set. This value defaults to 10.

+

Let’s also put this component in the pipeline: its place is between the splitter and the prompt builder.

+
template = """
+Question: {{ question }}
+
+Context:
+{% for document in documents %}
+    {{ document.content }}
+{% endfor %}
+
+Please reformulate the information above to answer the user's question.
+"""
+pipe = Pipeline()
+
+pipe.add_component("search", SerperDevWebSearch(api_key=serperdev_api_key))
+pipe.add_component("fetcher", LinkContentFetcher())
+pipe.add_component("converter", HTMLToDocument())
+pipe.add_component("cleaner", DocumentCleaner())
+pipe.add_component("splitter", DocumentSplitter(split_by="sentence", split_length=3))
+pipe.add_component("ranker", TransformersSimilarityRanker())
+pipe.add_component("prompt_builder", PromptBuilder(template=template))
+pipe.add_component("llm", GPTGenerator(api_key=api_key))
+pipe.connect("search.links", "fetcher")
+pipe.connect("fetcher", "converter")
+pipe.connect("converter", "cleaner")
+pipe.connect("cleaner", "splitter")
+pipe.connect("splitter", "ranker")
+pipe.connect("ranker", "prompt_builder.documents")
+pipe.connect("prompt_builder", "llm")
+
+question = "What's the official language of the Republic of Rose Island?"
+
+pipe.run({
+    "search": {"query": question},
+    "ranker": {"query": question},
+    "prompt_builder": {"question": question}
+})
+# returns {
+#     'llm': {
+#         'replies': [
+#             'The official language of the Republic of Rose Island was Esperanto.'
+#         ],
+#         'metadata': [...]
+#     }
+# }
+

Unfiltered Web RAG Pipeline

+

Note how the ranker needs to know the question to compare the documents, just like the search and prompt builder components do. So, we need to pass the value to the pipeline’s run() call.

+

+ Filtering file types + + + Link to heading + +

+

The pipeline we just built works great in most cases. However, it may occasionally fail if the search component happens to return some URL that does not point to a web page but, for example, directly to a video, a PDF, or a PPTX.

+

Haystack does offer some facilities to deal with these file types, but we will see these converters in another post. For now, let’s only filter those links out to prevent HTMLToDocument from crashing.

+

This task could be approached with Haystack in several ways, but the simplest in this scenario is to use a component that would typically be used for a slightly different purpose. This component is called FileTypeRouter.

+

FileTypeRouter is designed to route different files to their appropriate converters by checking their mime type. It does so by inspecting the content or the extension of the files it receives in input and producing an output dictionary with a separate list for each identified type.

+

However, we can also conveniently use this component as a filter. Let’s see how!

+
from haystack.preview.components.routers.file_type_router import FileTypeRouter
+
+router = FileTypeRouter(mime_types=["text/html"])
+router.run(sources=["Republic_of_Rose_Island.txt", "Republic_of_Rose_Island.html"])
+# returns defaultdict(list,
+#         {'unclassified': [PosixPath('Republic_of_Rose_Island.txt')],
+#          'text/html': [PosixPath('Republic_of_Rose_Island.html')]})
+

FileTypeRouter must always be initialized with the list of mime types it is supposed to handle. Not only that, but this component can also deal with files that do not match any of the expected mime types by putting them all under the unclassified category.

+

By putting this component between LinkContentFetcher and HTMLToDocument, we can make it forward along the pipeline only the files that match the text/html mime type and silently discard all others.

+

Notice how, in the pipeline below, I explicitly connect the text/html output only:

+
template = """
+Question: {{ question }}
+
+Google Search Answer Boxes:
+{% for document in documents %}
+    {{ document.content }}
+{% endfor %}
+
+Please reformulate the information above to answer the user's question.
+"""
+pipe = Pipeline()
+
+pipe.add_component("search", SerperDevWebSearch(api_key=serperdev_api_key))
+pipe.add_component("fetcher", LinkContentFetcher())
+pipe.add_component("filter", FileTypeRouter(mime_types=["text/html"]))
+pipe.add_component("converter", HTMLToDocument())
+pipe.add_component("cleaner", DocumentCleaner())
+pipe.add_component("splitter", DocumentSplitter(split_by="sentence", split_length=3))
+pipe.add_component("ranker", TransformersSimilarityRanker())
+pipe.add_component("prompt_builder", PromptBuilder(template=template))
+pipe.add_component("llm", GPTGenerator(api_key=api_key))
+pipe.connect("search.links", "fetcher")
+pipe.connect("fetcher", "filter")
+pipe.connect("filter.text/html", "converter")
+pipe.connect("converter", "cleaner")
+pipe.connect("cleaner", "splitter")
+pipe.connect("splitter", "ranker")
+pipe.connect("ranker", "prompt_builder.documents")
+pipe.connect("prompt_builder", "llm")
+
+question = "What's the official language of the Republic of Rose Island?"
+
+pipe.run({
+    "search": {"query": question},
+    "ranker": {"query": question},
+    "prompt_builder": {"question": question}
+})
+# returns {
+#     'llm': {
+#         'replies': [
+#             'The official language of the Republic of Rose Island was Esperanto.'
+#         ],
+#         'metadata': [...]
+#     }
+# }
+

HTML-only Web RAG Pipeline

+

With this last addition, we added quite a bit of robustness to our pipeline, making it less likely to fail.

+

+ Wrapping up + + + Link to heading + +

+

Web RAG is a use case that can be expanded to cover many use cases, resulting in very complex pipelines. Haystack helps make sense of their complexity by pipeline graphs and detailed error messages in case of mismatch connections. However, pipelines this large can become overwhelming, especially when more branches are added.

+

In one of our next posts, we will see how to cover such use cases while keeping the resulting complexity as low as possible.

+
+

Next: Soon!

+

Previous: Indexing data for RAG applications

+

See the entire series here: Haystack 2.0 series

+

Cover image from Wikipedia

+ +
+ + +
+ + +
+ + + + + +
+ + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/posts/2023-11-09-haystack-series-simple-web-rag/minimal-web-rag-pipeline.png b/posts/2023-11-09-haystack-series-simple-web-rag/minimal-web-rag-pipeline.png similarity index 100% rename from static/posts/2023-11-09-haystack-series-simple-web-rag/minimal-web-rag-pipeline.png rename to posts/2023-11-09-haystack-series-simple-web-rag/minimal-web-rag-pipeline.png diff --git a/static/posts/2023-11-09-haystack-series-simple-web-rag/unfiltered-web-rag-pipeline.png b/posts/2023-11-09-haystack-series-simple-web-rag/unfiltered-web-rag-pipeline.png similarity index 100% rename from static/posts/2023-11-09-haystack-series-simple-web-rag/unfiltered-web-rag-pipeline.png rename to posts/2023-11-09-haystack-series-simple-web-rag/unfiltered-web-rag-pipeline.png diff --git a/posts/index.html b/posts/index.html new file mode 100644 index 00000000..7c4832ba --- /dev/null +++ b/posts/index.html @@ -0,0 +1,264 @@ + + + + + + Posts · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Posts + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/posts/index.xml b/posts/index.xml new file mode 100644 index 00000000..a9abe7b8 --- /dev/null +++ b/posts/index.xml @@ -0,0 +1,92 @@ + + + + Posts on Sara Zan + https://www.zansara.dev/posts/ + Recent content in Posts on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + Canals: a new concept of Pipeline + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + As we have seen in the previous episode of this series, Haystack&rsquo;s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. +What does this mean in practice? Let&rsquo;s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + Why rewriting Haystack?! + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Wed, 11 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Before even diving into what Haystack 2.0 is, how it was built, and how it works, let&rsquo;s spend a few words about the whats and the whys. +First of all, what is Haystack? +And next, why on Earth did we decide to rewrite it from the ground up? +A Pioneer Framework Link to heading Haystack is a relatively young framework, its initial release dating back to November 28th, 2019. Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. + + + Haystack 2.0: What is it? + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + Tue, 10 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + December is finally approaching, and with it the release of a Haystack 2.0. At deepset, we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline. +But what is it that makes this release so special? +In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. + + + An (unofficial) Python SDK for Verbix + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + PyPI package: https://pypi.org/project/verbix-sdk/ +GitHub Repo: https://github.com/ZanSara/verbix-sdk +Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md +As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were &ldquo;simple&rdquo;: +Supports many languages, including Italian, Portuguese and Hungarian Conjugates irregulars properly Offers an API access to the conjugation tables Refuses to conjugate anything except for known verbs (Optional) Highlights the irregularities in some way Surprisingly these seem to be a shortage of good alternatives in this field. + + + My Dotfiles + https://www.zansara.dev/posts/2021-12-11-dotfiles/ + Sat, 11 Dec 2021 00:00:00 +0000 + https://www.zansara.dev/posts/2021-12-11-dotfiles/ + GitHub Repo: https://github.com/ZanSara/dotfiles +What Linux developer would I be if I didn&rsquo;t also have my very own dotfiles repo? +After many years of iterations I finally found a combination that lasted quite a while, so I figured it&rsquo;s time to treat them as a real project. It was originally optimized for my laptop, but then I realized it works quite well on my three-monitor desk setup as well without major issues. + + + diff --git a/posts/page/1/index.html b/posts/page/1/index.html new file mode 100644 index 00000000..1238a547 --- /dev/null +++ b/posts/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/posts/ + + + + + + diff --git a/projects/booking-system/index.html b/projects/booking-system/index.html new file mode 100644 index 00000000..fe4c9d30 --- /dev/null +++ b/projects/booking-system/index.html @@ -0,0 +1,294 @@ + + + + + + CAI Sovico's Website · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + CAI Sovico's Website + +

+
+ +
+ +
+ + Featured image + +

Main website: https://www.caisovico.it

+
+

Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business.

+

The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. It also includes an FTP server that supports a couple of ZanzoCams and a weather monitoring station.

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/projects/camerini.png b/projects/camerini.png similarity index 100% rename from static/projects/camerini.png rename to projects/camerini.png diff --git a/static/projects/haystack-2.0.png b/projects/haystack-2.0.png similarity index 100% rename from static/projects/haystack-2.0.png rename to projects/haystack-2.0.png diff --git a/projects/index.html b/projects/index.html new file mode 100644 index 00000000..1fff9970 --- /dev/null +++ b/projects/index.html @@ -0,0 +1,246 @@ + + + + + + Projects · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Projects + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/projects/index.xml b/projects/index.xml new file mode 100644 index 00000000..92461a52 --- /dev/null +++ b/projects/index.xml @@ -0,0 +1,30 @@ + + + + Projects on Sara Zan + https://www.zansara.dev/projects/ + Recent content in Projects on Sara Zan + Hugo -- gohugo.io + en + Wed, 01 Jan 2020 00:00:00 +0000 + + + ZanzoCam + https://www.zansara.dev/projects/zanzocam/ + Wed, 01 Jan 2020 00:00:00 +0000 + https://www.zansara.dev/projects/zanzocam/ + Main website: https://zanzocam.github.io/ +ZanzoCam is a low-power, low-frequency camera based on Raspberry Pi, designed to operate autonomously in remote locations and under harsh conditions. It was designed and developed between 2019 and 2021 for CAI Lombardia by a team of two people, with me as the software developer and the other responsible for the hardware design. CAI later deployed several of these devices on their affiliate huts. +ZanzoCams are designed to work reliably in the harsh conditions of alpine winters, be as power-efficient as possible, and tolerate unstable network connections: they feature a robust HTTP- or FTP-based picture upload strategy which is remotely configurable from a very simple, single-file web panel. + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + diff --git a/projects/page/1/index.html b/projects/page/1/index.html new file mode 100644 index 00000000..9f1ef2b8 --- /dev/null +++ b/projects/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/projects/ + + + + + + diff --git a/static/projects/zanzocam.png b/projects/zanzocam.png similarity index 100% rename from static/projects/zanzocam.png rename to projects/zanzocam.png diff --git a/projects/zanzocam/index.html b/projects/zanzocam/index.html new file mode 100644 index 00000000..4651f00e --- /dev/null +++ b/projects/zanzocam/index.html @@ -0,0 +1,291 @@ + + + + + + ZanzoCam · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + ZanzoCam + +

+
+ +
+ +
+ + Featured image + +

Main website: https://zanzocam.github.io/

+
+

ZanzoCam is a low-power, low-frequency camera based on Raspberry Pi, designed to operate autonomously in remote locations and under harsh conditions. It was designed and developed between 2019 and 2021 for CAI Lombardia by a team of two people, with me as the software developer and the other responsible for the hardware design. CAI later deployed several of these devices on their affiliate huts.

+

ZanzoCams are designed to work reliably in the harsh conditions of alpine winters, be as power-efficient as possible, and tolerate unstable network connections: they feature a robust HTTP- or FTP-based picture upload strategy which is remotely configurable from a very simple, single-file web panel. The camera software also improves on the basic capabilities of picamera to take pictures in dark conditions, making ZanzoCams able to shoot good pictures for a few hours after sunset.

+

The camera is highly configurable: photo size and frequency, server address and protocol, all the overlays (color, size, position, text and images) and several other parameters can be configured remotely without the need to expose any ports of the device to the internet. They work reliably without the need for a VPN and at the same time are quite secure by design.

+

ZanzoCams mostly serve CAI and the hut managers for self-promotion, and help hikers and climbers assess the local conditions before attempting a hike. Pictures taken for this purposes are sent to RifugiLombardia, and you can see many of them at this page.

+

However, it has also been used by glaciologists to monitor glacier conditions, outlook and extension over the years. Here you can see their webcams, some of which are ZanzoCams.

+

Here is the latest picture from Rifugio M. Del Grande - R. Camerini, the test location for the original prototype:

+

ZanzoCam of Rifugio M. Del Grande - R. Camerini

+

And here is one of the cameras serving a local glaciology research group, Servizio Glaciologico Lombardo:

+

ZanzoCam of M. Disgrazia

+

Both of these cameras are fully solar-powered.

+

ZanzoCam is fully open-source: check the GitHub repo. Due to this decision of open-sourcing the project, I was invited by Università di Pavia to hold a lecture about the project as part of their “Hardware and Software Codesign”. Check out the slides of the lecture here.

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/publications/index.html b/publications/index.html new file mode 100644 index 00000000..7f6500ab --- /dev/null +++ b/publications/index.html @@ -0,0 +1,244 @@ + + + + + + Publications · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + + + +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/publications/index.xml b/publications/index.xml new file mode 100644 index 00000000..4ff18fab --- /dev/null +++ b/publications/index.xml @@ -0,0 +1,42 @@ + + + + Publications on Sara Zan + https://www.zansara.dev/publications/ + Recent content in Publications on Sara Zan + Hugo -- gohugo.io + en + Tue, 01 Mar 2022 00:00:00 +0000 + + + Adopting PyQt For Beam Instrumentation GUI Development At CERN + https://www.zansara.dev/publications/thpv014/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/publications/thpv014/ + Abstract Link to heading As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. + + + Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF) + https://www.zansara.dev/publications/thpv042/ + Sat, 11 Dec 2021 00:00:00 +0000 + https://www.zansara.dev/publications/thpv042/ + Abstract Link to heading The CERN accelerators require a large number of instruments, measuring different beam parameters like position, losses, current etc. The instruments’ associated electronics and software also produce information about their status. All these data are stored in a database for later analysis. The Beam Instrumentation group developed the Offline Analysis Framework some years ago to regularly and systematically analyze these data. The framework has been successfully used for nearly 100 different analyses that ran regularly by the end of the LHC run 2. + + + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs + https://www.zansara.dev/publications/tucpr03/ + Sun, 30 Aug 2020 00:00:00 +0000 + https://www.zansara.dev/publications/tucpr03/ + Abstract Link to heading For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. + + + Evaluation of Qt as GUI Framework for Accelerator Controls + https://www.zansara.dev/publications/msc-thesis/ + Thu, 20 Dec 2018 00:00:00 +0000 + https://www.zansara.dev/publications/msc-thesis/ + This is the full-text of my MSc thesis, written in collaboration with Politecnico di Milano and CERN. +Get the full text here: Evaluation of Qt as GUI Framework for Accelerator Controls +Publisher&rsquo;s entry: 10589/144860. + + + diff --git a/static/publications/msc-thesis.pdf b/publications/msc-thesis.pdf similarity index 100% rename from static/publications/msc-thesis.pdf rename to publications/msc-thesis.pdf diff --git a/static/publications/msc-thesis.png b/publications/msc-thesis.png similarity index 100% rename from static/publications/msc-thesis.png rename to publications/msc-thesis.png diff --git a/publications/msc-thesis/index.html b/publications/msc-thesis/index.html new file mode 100644 index 00000000..1facde9d --- /dev/null +++ b/publications/msc-thesis/index.html @@ -0,0 +1,304 @@ + + + + + + Evaluation of Qt as GUI Framework for Accelerator Controls · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Evaluation of Qt as GUI Framework for Accelerator Controls + +

+
+ +
+ +
+ + Featured image + +

This is the full-text of my MSc thesis, written in collaboration with +Politecnico di Milano and CERN.

+
+

Get the full text here: Evaluation of Qt as GUI Framework for Accelerator Controls

+

Publisher’s entry: 10589/144860.

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/publications/page/1/index.html b/publications/page/1/index.html new file mode 100644 index 00000000..2c0b4a27 --- /dev/null +++ b/publications/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/publications/ + + + + + + diff --git a/static/publications/thpv014-poster.pdf b/publications/thpv014-poster.pdf similarity index 100% rename from static/publications/thpv014-poster.pdf rename to publications/thpv014-poster.pdf diff --git a/static/publications/thpv014.pdf b/publications/thpv014.pdf similarity index 100% rename from static/publications/thpv014.pdf rename to publications/thpv014.pdf diff --git a/static/publications/thpv014.png b/publications/thpv014.png similarity index 100% rename from static/publications/thpv014.png rename to publications/thpv014.png diff --git a/publications/thpv014/index.html b/publications/thpv014/index.html new file mode 100644 index 00000000..972dfff5 --- /dev/null +++ b/publications/thpv014/index.html @@ -0,0 +1,305 @@ + + + + + + Adopting PyQt For Beam Instrumentation GUI Development At CERN · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Adopting PyQt For Beam Instrumentation GUI Development At CERN + +

+
+ +
+ +
+ + Featured image + +

+ Abstract + + + Link to heading + +

+

As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. To conclude, the challenges we anticipate for the BI GUI developer community in adopting this new technology are also discussed.

+
+

Get the full text here: Adopting PyQt For Beam Instrumentation GUI Development At CERN

+

Get the poster: PDF

+

Publisher’s entry: THPV014

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/publications/thpv042.pdf b/publications/thpv042.pdf similarity index 100% rename from static/publications/thpv042.pdf rename to publications/thpv042.pdf diff --git a/static/publications/thpv042.png b/publications/thpv042.png similarity index 100% rename from static/publications/thpv042.png rename to publications/thpv042.png diff --git a/publications/thpv042/index.html b/publications/thpv042/index.html new file mode 100644 index 00000000..759d429a --- /dev/null +++ b/publications/thpv042/index.html @@ -0,0 +1,292 @@ + + + + + + Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF) · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF) + +

+
+ +
+ +
+ + Featured image + +

+ Abstract + + + Link to heading + +

+

The CERN accelerators require a large number of instruments, measuring different beam parameters like position, losses, current etc. The instruments’ associated electronics and software also produce information about their status. All these data are stored in a database for later analysis. The Beam Instrumentation group developed the Offline Analysis Framework some years ago to regularly and systematically analyze these data. The framework has been successfully used for nearly 100 different analyses that ran regularly by the end of the LHC run 2. Currently it is being updated for run 3 with modern and efficient tools to improve its usability and data analysis power. In particular, the architecture has been reviewed to have a modular design to facilitate the maintenance and the future evolution of the tool. A new web based application is being developed to facilitate the users’ access both to online configuration and to results. This paper will describe all these evolutions and outline possible lines of work for further improvements.

+
+

Get the full text here: Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF)

+

Publisher’s entry: THPV042.

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/publications/tucpr03.pdf b/publications/tucpr03.pdf similarity index 100% rename from static/publications/tucpr03.pdf rename to publications/tucpr03.pdf diff --git a/static/publications/tucpr03.png b/publications/tucpr03.png similarity index 100% rename from static/publications/tucpr03.png rename to publications/tucpr03.png diff --git a/publications/tucpr03/index.html b/publications/tucpr03/index.html new file mode 100644 index 00000000..4076e786 --- /dev/null +++ b/publications/tucpr03/index.html @@ -0,0 +1,304 @@ + + + + + + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs + +

+
+ +
+ +
+ + Featured image + +

+ Abstract + + + Link to heading + +

+

For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. This was a wakeup call for us. We took the opportunity to reconsider all technical options for developing operational GUIs. Our options ranged from sticking with JavaFX, over using the Qt framework (either using PyQt or developing our own Java Bindings to Qt), to using Web technology both in a browser and in native desktop applications. This article explains the reasons for moving away from Java as the main GUI technology and describes the analysis and hands-on evaluations that we went through before choosing the replacement.

+
+

Get the full text here: Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs

+

Publisher’s entry: TUCPR03.

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/series/haystack-2.0-series/index.html b/series/haystack-2.0-series/index.html new file mode 100644 index 00000000..d845350c --- /dev/null +++ b/series/haystack-2.0-series/index.html @@ -0,0 +1,256 @@ + + + + + + Haystack 2.0 Series · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Haystack 2.0 Series + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/series/haystack-2.0-series/index.xml b/series/haystack-2.0-series/index.xml new file mode 100644 index 00000000..2114b625 --- /dev/null +++ b/series/haystack-2.0-series/index.xml @@ -0,0 +1,72 @@ + + + + Haystack 2.0 Series on Sara Zan + https://www.zansara.dev/series/haystack-2.0-series/ + Recent content in Haystack 2.0 Series on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + Canals: a new concept of Pipeline + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + As we have seen in the previous episode of this series, Haystack&rsquo;s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. +What does this mean in practice? Let&rsquo;s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + Why rewriting Haystack?! + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Wed, 11 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Before even diving into what Haystack 2.0 is, how it was built, and how it works, let&rsquo;s spend a few words about the whats and the whys. +First of all, what is Haystack? +And next, why on Earth did we decide to rewrite it from the ground up? +A Pioneer Framework Link to heading Haystack is a relatively young framework, its initial release dating back to November 28th, 2019. Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. + + + Haystack 2.0: What is it? + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + Tue, 10 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + December is finally approaching, and with it the release of a Haystack 2.0. At deepset, we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline. +But what is it that makes this release so special? +In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. + + + diff --git a/series/haystack-2.0-series/page/1/index.html b/series/haystack-2.0-series/page/1/index.html new file mode 100644 index 00000000..6c3659f2 --- /dev/null +++ b/series/haystack-2.0-series/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/series/haystack-2.0-series/ + + + + + + diff --git a/series/index.html b/series/index.html new file mode 100644 index 00000000..28841250 --- /dev/null +++ b/series/index.html @@ -0,0 +1,231 @@ + + + + + Series · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ Series +

+
+ + +
+ + +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/series/index.xml b/series/index.xml new file mode 100644 index 00000000..04ce78fb --- /dev/null +++ b/series/index.xml @@ -0,0 +1,19 @@ + + + + Series on Sara Zan + https://www.zansara.dev/series/ + Recent content in Series on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + Haystack 2.0 Series + https://www.zansara.dev/series/haystack-2.0-series/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/series/haystack-2.0-series/ + + + + diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..04576ecc --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,238 @@ + + + + https://www.zansara.dev/tags/gpt/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/haystack/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/haystack-2.0/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/series/haystack-2.0-series/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/indexing/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/llm/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/nlp/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/posts/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/python/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/rag/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/retrieval-augmentation/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/series/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/web/ + 2023-11-09T00:00:00+00:00 + + https://www.zansara.dev/tags/document-store/ + 2023-11-05T00:00:00+00:00 + + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + 2023-11-05T00:00:00+00:00 + + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + 2023-10-27T00:00:00+00:00 + + https://www.zansara.dev/tags/semantic-search/ + 2023-10-27T00:00:00+00:00 + + https://www.zansara.dev/tags/api-design/ + 2023-10-26T00:00:00+00:00 + + https://www.zansara.dev/tags/canals/ + 2023-10-26T00:00:00+00:00 + + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + 2023-10-26T00:00:00+00:00 + + https://www.zansara.dev/tags/dag/ + 2023-10-26T00:00:00+00:00 + + https://www.zansara.dev/tags/pipeline/ + 2023-10-26T00:00:00+00:00 + + https://www.zansara.dev/tags/graph/ + 2023-10-15T00:00:00+00:00 + + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + 2023-10-15T00:00:00+00:00 + + https://www.zansara.dev/tags/hybrid-retrieval/ + 2023-10-15T00:00:00+00:00 + + https://www.zansara.dev/tags/ai/ + 2023-10-12T00:00:00+00:00 + + https://www.zansara.dev/tags/office-hours/ + 2023-10-12T00:00:00+00:00 + + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + 2023-10-12T00:00:00+00:00 + + https://www.zansara.dev/talks/ + 2023-10-12T00:00:00+00:00 + + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + 2023-10-11T00:00:00+00:00 + + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + 2023-10-10T00:00:00+00:00 + + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + 2023-09-10T00:00:00+00:00 + + https://www.zansara.dev/tags/api/ + 2023-09-10T00:00:00+00:00 + + https://www.zansara.dev/tags/conjugations/ + 2023-09-10T00:00:00+00:00 + + https://www.zansara.dev/tags/flashcards/ + 2023-09-10T00:00:00+00:00 + + https://www.zansara.dev/tags/languages/ + 2023-09-10T00:00:00+00:00 + + https://www.zansara.dev/tags/linux/ + 2023-09-10T00:00:00+00:00 + + https://www.zansara.dev/tags/sdk/ + 2023-09-10T00:00:00+00:00 + + https://www.zansara.dev/tags/verbix/ + 2023-09-10T00:00:00+00:00 + + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + 2023-08-03T00:00:00+00:00 + + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + 2022-12-01T00:00:00+00:00 + + https://www.zansara.dev/tags/colab/ + 2022-12-01T00:00:00+00:00 + + https://www.zansara.dev/tags/images/ + 2022-12-01T00:00:00+00:00 + + https://www.zansara.dev/tags/multi-modality/ + 2022-12-01T00:00:00+00:00 + + https://www.zansara.dev/tags/opennlp-meetup/ + 2022-12-01T00:00:00+00:00 + + https://www.zansara.dev/tags/retrieval/ + 2022-12-01T00:00:00+00:00 + + https://www.zansara.dev/tags/text-to-image/ + 2022-12-01T00:00:00+00:00 + + https://www.zansara.dev/publications/thpv014/ + 2022-03-01T00:00:00+00:00 + + https://www.zansara.dev/tags/cern/ + 2022-03-01T00:00:00+00:00 + + https://www.zansara.dev/tags/gui/ + 2022-03-01T00:00:00+00:00 + + https://www.zansara.dev/tags/javafx/ + 2022-03-01T00:00:00+00:00 + + https://www.zansara.dev/tags/physics/ + 2022-03-01T00:00:00+00:00 + + https://www.zansara.dev/publications/ + 2022-03-01T00:00:00+00:00 + + https://www.zansara.dev/tags/pyqt/ + 2022-03-01T00:00:00+00:00 + + https://www.zansara.dev/tags/qt/ + 2022-03-01T00:00:00+00:00 + + https://www.zansara.dev/tags/data-science/ + 2021-12-11T00:00:00+00:00 + + https://www.zansara.dev/publications/thpv042/ + 2021-12-11T00:00:00+00:00 + + https://www.zansara.dev/posts/2021-12-11-dotfiles/ + 2021-12-11T00:00:00+00:00 + + https://www.zansara.dev/tags/cai/ + 2021-05-24T00:00:00+00:00 + + https://www.zansara.dev/tags/hiking/ + 2021-05-24T00:00:00+00:00 + + https://www.zansara.dev/tags/raspberry-pi/ + 2021-05-24T00:00:00+00:00 + + https://www.zansara.dev/tags/zanzocam/ + 2021-05-24T00:00:00+00:00 + + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + 2021-05-24T00:00:00+00:00 + + https://www.zansara.dev/publications/tucpr03/ + 2020-08-30T00:00:00+00:00 + + https://www.zansara.dev/projects/ + 2020-01-01T00:00:00+00:00 + + https://www.zansara.dev/projects/zanzocam/ + 2020-01-01T00:00:00+00:00 + + https://www.zansara.dev/publications/msc-thesis/ + 2018-12-20T00:00:00+00:00 + + https://www.zansara.dev/projects/booking-system/ + 2016-01-01T00:00:00+00:00 + + https://www.zansara.dev/tags/css/ + 2016-01-01T00:00:00+00:00 + + https://www.zansara.dev/tags/html/ + 2016-01-01T00:00:00+00:00 + + https://www.zansara.dev/tags/javascript/ + 2016-01-01T00:00:00+00:00 + + https://www.zansara.dev/tags/mysql/ + 2016-01-01T00:00:00+00:00 + + https://www.zansara.dev/tags/php/ + 2016-01-01T00:00:00+00:00 + + https://www.zansara.dev/tags/php-8/ + 2016-01-01T00:00:00+00:00 + + https://www.zansara.dev/about/ + + diff --git a/tags/ai/index.html b/tags/ai/index.html new file mode 100644 index 00000000..0f923a64 --- /dev/null +++ b/tags/ai/index.html @@ -0,0 +1,248 @@ + + + + + + AI · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + AI + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/ai/index.xml b/tags/ai/index.xml new file mode 100644 index 00000000..47ffd0bb --- /dev/null +++ b/tags/ai/index.xml @@ -0,0 +1,64 @@ + + + + AI on Sara Zan + https://www.zansara.dev/tags/ai/ + Recent content in AI on Sara Zan + Hugo -- gohugo.io + en + Thu, 12 Oct 2023 00:00:00 +0000 + + + Office Hours: RAG Pipelines + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Recording: Office Hours - RAG Pipelines +Notebook: RAG_Pipelines.ipynb +All the material can also be found here. +In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. +In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. + + + Why rewriting Haystack?! + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Wed, 11 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Before even diving into what Haystack 2.0 is, how it was built, and how it works, let&rsquo;s spend a few words about the whats and the whys. +First of all, what is Haystack? +And next, why on Earth did we decide to rewrite it from the ground up? +A Pioneer Framework Link to heading Haystack is a relatively young framework, its initial release dating back to November 28th, 2019. Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. + + + Haystack 2.0: What is it? + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + Tue, 10 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + December is finally approaching, and with it the release of a Haystack 2.0. At deepset, we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline. +But what is it that makes this release so special? +In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. + + + Office Hours: Haystack 2.0 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Thu, 03 Aug 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Recording: Haystack v2 - Office Hours +Slides: Haystack v2 - Status Update +All the material can also be found here. +In this Office Hours I&rsquo;ve presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a preview module in the latest Haystack 1. + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + diff --git a/tags/ai/page/1/index.html b/tags/ai/page/1/index.html new file mode 100644 index 00000000..696cedd9 --- /dev/null +++ b/tags/ai/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/ai/ + + + + + + diff --git a/tags/api-design/index.html b/tags/api-design/index.html new file mode 100644 index 00000000..1d564ae5 --- /dev/null +++ b/tags/api-design/index.html @@ -0,0 +1,236 @@ + + + + + + API Design · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + API Design + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/api-design/index.xml b/tags/api-design/index.xml new file mode 100644 index 00000000..bd5329b8 --- /dev/null +++ b/tags/api-design/index.xml @@ -0,0 +1,28 @@ + + + + API Design on Sara Zan + https://www.zansara.dev/tags/api-design/ + Recent content in API Design on Sara Zan + Hugo -- gohugo.io + en + Thu, 26 Oct 2023 00:00:00 +0000 + + + Canals: a new concept of Pipeline + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + As we have seen in the previous episode of this series, Haystack&rsquo;s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. +What does this mean in practice? Let&rsquo;s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + diff --git a/tags/api-design/page/1/index.html b/tags/api-design/page/1/index.html new file mode 100644 index 00000000..60710514 --- /dev/null +++ b/tags/api-design/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/api-design/ + + + + + + diff --git a/tags/api/index.html b/tags/api/index.html new file mode 100644 index 00000000..ad724863 --- /dev/null +++ b/tags/api/index.html @@ -0,0 +1,232 @@ + + + + + + API · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + API + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/api/index.xml b/tags/api/index.xml new file mode 100644 index 00000000..a95e3bc8 --- /dev/null +++ b/tags/api/index.xml @@ -0,0 +1,23 @@ + + + + API on Sara Zan + https://www.zansara.dev/tags/api/ + Recent content in API on Sara Zan + Hugo -- gohugo.io + en + Sun, 10 Sep 2023 00:00:00 +0000 + + + An (unofficial) Python SDK for Verbix + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + PyPI package: https://pypi.org/project/verbix-sdk/ +GitHub Repo: https://github.com/ZanSara/verbix-sdk +Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md +As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were &ldquo;simple&rdquo;: +Supports many languages, including Italian, Portuguese and Hungarian Conjugates irregulars properly Offers an API access to the conjugation tables Refuses to conjugate anything except for known verbs (Optional) Highlights the irregularities in some way Surprisingly these seem to be a shortage of good alternatives in this field. + + + diff --git a/tags/api/page/1/index.html b/tags/api/page/1/index.html new file mode 100644 index 00000000..08b65de5 --- /dev/null +++ b/tags/api/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/api/ + + + + + + diff --git a/tags/cai/index.html b/tags/cai/index.html new file mode 100644 index 00000000..d94a7aad --- /dev/null +++ b/tags/cai/index.html @@ -0,0 +1,250 @@ + + + + + + CAI · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + CAI + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/cai/index.xml b/tags/cai/index.xml new file mode 100644 index 00000000..4c46696f --- /dev/null +++ b/tags/cai/index.xml @@ -0,0 +1,39 @@ + + + + CAI on Sara Zan + https://www.zansara.dev/tags/cai/ + Recent content in CAI on Sara Zan + Hugo -- gohugo.io + en + Mon, 24 May 2021 00:00:00 +0000 + + + ZanzoCam: An open-source alpine web camera + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Slides: ZanzoCam: An open-source alpine web camera +On May 24th 2021 I held a talk about the ZanzoCam project as invited speaker for the &ldquo;Hardware and Software Codesign&rdquo; course at Università di Pavia. +The slides go through the entire lifecycle of the ZanzoCam project, from the very inception of it, the market research, our decision process, earlier prototypes, and then goes into a more detailed explanation of the the design and implementation of the project from a hardware and software perspective, with some notes about our financial situation and project management. + + + ZanzoCam + https://www.zansara.dev/projects/zanzocam/ + Wed, 01 Jan 2020 00:00:00 +0000 + https://www.zansara.dev/projects/zanzocam/ + Main website: https://zanzocam.github.io/ +ZanzoCam is a low-power, low-frequency camera based on Raspberry Pi, designed to operate autonomously in remote locations and under harsh conditions. It was designed and developed between 2019 and 2021 for CAI Lombardia by a team of two people, with me as the software developer and the other responsible for the hardware design. CAI later deployed several of these devices on their affiliate huts. +ZanzoCams are designed to work reliably in the harsh conditions of alpine winters, be as power-efficient as possible, and tolerate unstable network connections: they feature a robust HTTP- or FTP-based picture upload strategy which is remotely configurable from a very simple, single-file web panel. + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + diff --git a/tags/cai/page/1/index.html b/tags/cai/page/1/index.html new file mode 100644 index 00000000..4b04e806 --- /dev/null +++ b/tags/cai/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/cai/ + + + + + + diff --git a/tags/canals/index.html b/tags/canals/index.html new file mode 100644 index 00000000..8b85fefb --- /dev/null +++ b/tags/canals/index.html @@ -0,0 +1,232 @@ + + + + + + Canals · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Canals + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/canals/index.xml b/tags/canals/index.xml new file mode 100644 index 00000000..85a735fe --- /dev/null +++ b/tags/canals/index.xml @@ -0,0 +1,20 @@ + + + + Canals on Sara Zan + https://www.zansara.dev/tags/canals/ + Recent content in Canals on Sara Zan + Hugo -- gohugo.io + en + Thu, 26 Oct 2023 00:00:00 +0000 + + + Canals: a new concept of Pipeline + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + As we have seen in the previous episode of this series, Haystack&rsquo;s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. +What does this mean in practice? Let&rsquo;s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1. + + + diff --git a/tags/canals/page/1/index.html b/tags/canals/page/1/index.html new file mode 100644 index 00000000..a0d66b0f --- /dev/null +++ b/tags/canals/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/canals/ + + + + + + diff --git a/tags/cern/index.html b/tags/cern/index.html new file mode 100644 index 00000000..a5fa55ff --- /dev/null +++ b/tags/cern/index.html @@ -0,0 +1,244 @@ + + + + + + CERN · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + + + +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/cern/index.xml b/tags/cern/index.xml new file mode 100644 index 00000000..a33f40f5 --- /dev/null +++ b/tags/cern/index.xml @@ -0,0 +1,42 @@ + + + + CERN on Sara Zan + https://www.zansara.dev/tags/cern/ + Recent content in CERN on Sara Zan + Hugo -- gohugo.io + en + Tue, 01 Mar 2022 00:00:00 +0000 + + + Adopting PyQt For Beam Instrumentation GUI Development At CERN + https://www.zansara.dev/publications/thpv014/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/publications/thpv014/ + Abstract Link to heading As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. + + + Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF) + https://www.zansara.dev/publications/thpv042/ + Sat, 11 Dec 2021 00:00:00 +0000 + https://www.zansara.dev/publications/thpv042/ + Abstract Link to heading The CERN accelerators require a large number of instruments, measuring different beam parameters like position, losses, current etc. The instruments’ associated electronics and software also produce information about their status. All these data are stored in a database for later analysis. The Beam Instrumentation group developed the Offline Analysis Framework some years ago to regularly and systematically analyze these data. The framework has been successfully used for nearly 100 different analyses that ran regularly by the end of the LHC run 2. + + + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs + https://www.zansara.dev/publications/tucpr03/ + Sun, 30 Aug 2020 00:00:00 +0000 + https://www.zansara.dev/publications/tucpr03/ + Abstract Link to heading For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. + + + Evaluation of Qt as GUI Framework for Accelerator Controls + https://www.zansara.dev/publications/msc-thesis/ + Thu, 20 Dec 2018 00:00:00 +0000 + https://www.zansara.dev/publications/msc-thesis/ + This is the full-text of my MSc thesis, written in collaboration with Politecnico di Milano and CERN. +Get the full text here: Evaluation of Qt as GUI Framework for Accelerator Controls +Publisher&rsquo;s entry: 10589/144860. + + + diff --git a/tags/cern/page/1/index.html b/tags/cern/page/1/index.html new file mode 100644 index 00000000..b4c4112e --- /dev/null +++ b/tags/cern/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/cern/ + + + + + + diff --git a/tags/colab/index.html b/tags/colab/index.html new file mode 100644 index 00000000..13f21e7c --- /dev/null +++ b/tags/colab/index.html @@ -0,0 +1,232 @@ + + + + + + Colab · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Colab + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/colab/index.xml b/tags/colab/index.xml new file mode 100644 index 00000000..456583bb --- /dev/null +++ b/tags/colab/index.xml @@ -0,0 +1,24 @@ + + + + Colab on Sara Zan + https://www.zansara.dev/tags/colab/ + Recent content in Colab on Sara Zan + Hugo -- gohugo.io + en + Thu, 01 Dec 2022 00:00:00 +0000 + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + diff --git a/tags/colab/page/1/index.html b/tags/colab/page/1/index.html new file mode 100644 index 00000000..c6a93131 --- /dev/null +++ b/tags/colab/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/colab/ + + + + + + diff --git a/tags/conjugations/index.html b/tags/conjugations/index.html new file mode 100644 index 00000000..65b41436 --- /dev/null +++ b/tags/conjugations/index.html @@ -0,0 +1,232 @@ + + + + + + Conjugations · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Conjugations + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/conjugations/index.xml b/tags/conjugations/index.xml new file mode 100644 index 00000000..7a71129f --- /dev/null +++ b/tags/conjugations/index.xml @@ -0,0 +1,23 @@ + + + + Conjugations on Sara Zan + https://www.zansara.dev/tags/conjugations/ + Recent content in Conjugations on Sara Zan + Hugo -- gohugo.io + en + Sun, 10 Sep 2023 00:00:00 +0000 + + + An (unofficial) Python SDK for Verbix + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + PyPI package: https://pypi.org/project/verbix-sdk/ +GitHub Repo: https://github.com/ZanSara/verbix-sdk +Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md +As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were &ldquo;simple&rdquo;: +Supports many languages, including Italian, Portuguese and Hungarian Conjugates irregulars properly Offers an API access to the conjugation tables Refuses to conjugate anything except for known verbs (Optional) Highlights the irregularities in some way Surprisingly these seem to be a shortage of good alternatives in this field. + + + diff --git a/tags/conjugations/page/1/index.html b/tags/conjugations/page/1/index.html new file mode 100644 index 00000000..76ce002b --- /dev/null +++ b/tags/conjugations/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/conjugations/ + + + + + + diff --git a/tags/css/index.html b/tags/css/index.html new file mode 100644 index 00000000..21e8f094 --- /dev/null +++ b/tags/css/index.html @@ -0,0 +1,237 @@ + + + + + + CSS · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + CSS + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/css/index.xml b/tags/css/index.xml new file mode 100644 index 00000000..9685c7ff --- /dev/null +++ b/tags/css/index.xml @@ -0,0 +1,21 @@ + + + + CSS on Sara Zan + https://www.zansara.dev/tags/css/ + Recent content in CSS on Sara Zan + Hugo -- gohugo.io + en + Fri, 01 Jan 2016 00:00:00 +0000 + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + diff --git a/tags/css/page/1/index.html b/tags/css/page/1/index.html new file mode 100644 index 00000000..857836d8 --- /dev/null +++ b/tags/css/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/css/ + + + + + + diff --git a/tags/dag/index.html b/tags/dag/index.html new file mode 100644 index 00000000..d33dafcd --- /dev/null +++ b/tags/dag/index.html @@ -0,0 +1,236 @@ + + + + + + DAG · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + DAG + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/dag/index.xml b/tags/dag/index.xml new file mode 100644 index 00000000..96d76769 --- /dev/null +++ b/tags/dag/index.xml @@ -0,0 +1,28 @@ + + + + DAG on Sara Zan + https://www.zansara.dev/tags/dag/ + Recent content in DAG on Sara Zan + Hugo -- gohugo.io + en + Thu, 26 Oct 2023 00:00:00 +0000 + + + Canals: a new concept of Pipeline + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + As we have seen in the previous episode of this series, Haystack&rsquo;s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. +What does this mean in practice? Let&rsquo;s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + diff --git a/tags/dag/page/1/index.html b/tags/dag/page/1/index.html new file mode 100644 index 00000000..85e35dbc --- /dev/null +++ b/tags/dag/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/dag/ + + + + + + diff --git a/tags/data-science/index.html b/tags/data-science/index.html new file mode 100644 index 00000000..b2d5b71f --- /dev/null +++ b/tags/data-science/index.html @@ -0,0 +1,232 @@ + + + + + + Data Science · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Data Science + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/data-science/index.xml b/tags/data-science/index.xml new file mode 100644 index 00000000..62bd5098 --- /dev/null +++ b/tags/data-science/index.xml @@ -0,0 +1,19 @@ + + + + Data Science on Sara Zan + https://www.zansara.dev/tags/data-science/ + Recent content in Data Science on Sara Zan + Hugo -- gohugo.io + en + Sat, 11 Dec 2021 00:00:00 +0000 + + + Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF) + https://www.zansara.dev/publications/thpv042/ + Sat, 11 Dec 2021 00:00:00 +0000 + https://www.zansara.dev/publications/thpv042/ + Abstract Link to heading The CERN accelerators require a large number of instruments, measuring different beam parameters like position, losses, current etc. The instruments’ associated electronics and software also produce information about their status. All these data are stored in a database for later analysis. The Beam Instrumentation group developed the Offline Analysis Framework some years ago to regularly and systematically analyze these data. The framework has been successfully used for nearly 100 different analyses that ran regularly by the end of the LHC run 2. + + + diff --git a/tags/data-science/page/1/index.html b/tags/data-science/page/1/index.html new file mode 100644 index 00000000..5c917768 --- /dev/null +++ b/tags/data-science/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/data-science/ + + + + + + diff --git a/tags/document-store/index.html b/tags/document-store/index.html new file mode 100644 index 00000000..9f13b520 --- /dev/null +++ b/tags/document-store/index.html @@ -0,0 +1,232 @@ + + + + + + Document Store · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Document Store + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/document-store/index.xml b/tags/document-store/index.xml new file mode 100644 index 00000000..bf82cb7f --- /dev/null +++ b/tags/document-store/index.xml @@ -0,0 +1,20 @@ + + + + Document Store on Sara Zan + https://www.zansara.dev/tags/document-store/ + Recent content in Document Store on Sara Zan + Hugo -- gohugo.io + en + Sun, 05 Nov 2023 00:00:00 +0000 + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + diff --git a/tags/document-store/page/1/index.html b/tags/document-store/page/1/index.html new file mode 100644 index 00000000..20c94bec --- /dev/null +++ b/tags/document-store/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/document-store/ + + + + + + diff --git a/tags/flashcards/index.html b/tags/flashcards/index.html new file mode 100644 index 00000000..543464d2 --- /dev/null +++ b/tags/flashcards/index.html @@ -0,0 +1,232 @@ + + + + + + Flashcards · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Flashcards + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/flashcards/index.xml b/tags/flashcards/index.xml new file mode 100644 index 00000000..3c7beedc --- /dev/null +++ b/tags/flashcards/index.xml @@ -0,0 +1,23 @@ + + + + Flashcards on Sara Zan + https://www.zansara.dev/tags/flashcards/ + Recent content in Flashcards on Sara Zan + Hugo -- gohugo.io + en + Sun, 10 Sep 2023 00:00:00 +0000 + + + An (unofficial) Python SDK for Verbix + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + PyPI package: https://pypi.org/project/verbix-sdk/ +GitHub Repo: https://github.com/ZanSara/verbix-sdk +Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md +As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were &ldquo;simple&rdquo;: +Supports many languages, including Italian, Portuguese and Hungarian Conjugates irregulars properly Offers an API access to the conjugation tables Refuses to conjugate anything except for known verbs (Optional) Highlights the irregularities in some way Surprisingly these seem to be a shortage of good alternatives in this field. + + + diff --git a/tags/flashcards/page/1/index.html b/tags/flashcards/page/1/index.html new file mode 100644 index 00000000..0ce07688 --- /dev/null +++ b/tags/flashcards/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/flashcards/ + + + + + + diff --git a/tags/gpt/index.html b/tags/gpt/index.html new file mode 100644 index 00000000..a403a557 --- /dev/null +++ b/tags/gpt/index.html @@ -0,0 +1,236 @@ + + + + + + GPT · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + GPT + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/gpt/index.xml b/tags/gpt/index.xml new file mode 100644 index 00000000..6a02a826 --- /dev/null +++ b/tags/gpt/index.xml @@ -0,0 +1,29 @@ + + + + GPT on Sara Zan + https://www.zansara.dev/tags/gpt/ + Recent content in GPT on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + diff --git a/tags/gpt/page/1/index.html b/tags/gpt/page/1/index.html new file mode 100644 index 00000000..02af4e18 --- /dev/null +++ b/tags/gpt/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/gpt/ + + + + + + diff --git a/tags/graph/index.html b/tags/graph/index.html new file mode 100644 index 00000000..47f9785f --- /dev/null +++ b/tags/graph/index.html @@ -0,0 +1,232 @@ + + + + + + graph · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + graph + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/graph/index.xml b/tags/graph/index.xml new file mode 100644 index 00000000..88b7a23a --- /dev/null +++ b/tags/graph/index.xml @@ -0,0 +1,20 @@ + + + + graph on Sara Zan + https://www.zansara.dev/tags/graph/ + Recent content in graph on Sara Zan + Hugo -- gohugo.io + en + Sun, 15 Oct 2023 00:00:00 +0000 + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + diff --git a/tags/graph/page/1/index.html b/tags/graph/page/1/index.html new file mode 100644 index 00000000..88c753d6 --- /dev/null +++ b/tags/graph/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/graph/ + + + + + + diff --git a/tags/gui/index.html b/tags/gui/index.html new file mode 100644 index 00000000..93f5f383 --- /dev/null +++ b/tags/gui/index.html @@ -0,0 +1,240 @@ + + + + + + GUI · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + + + +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/gui/index.xml b/tags/gui/index.xml new file mode 100644 index 00000000..1cee0d97 --- /dev/null +++ b/tags/gui/index.xml @@ -0,0 +1,35 @@ + + + + GUI on Sara Zan + https://www.zansara.dev/tags/gui/ + Recent content in GUI on Sara Zan + Hugo -- gohugo.io + en + Tue, 01 Mar 2022 00:00:00 +0000 + + + Adopting PyQt For Beam Instrumentation GUI Development At CERN + https://www.zansara.dev/publications/thpv014/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/publications/thpv014/ + Abstract Link to heading As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. + + + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs + https://www.zansara.dev/publications/tucpr03/ + Sun, 30 Aug 2020 00:00:00 +0000 + https://www.zansara.dev/publications/tucpr03/ + Abstract Link to heading For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. + + + Evaluation of Qt as GUI Framework for Accelerator Controls + https://www.zansara.dev/publications/msc-thesis/ + Thu, 20 Dec 2018 00:00:00 +0000 + https://www.zansara.dev/publications/msc-thesis/ + This is the full-text of my MSc thesis, written in collaboration with Politecnico di Milano and CERN. +Get the full text here: Evaluation of Qt as GUI Framework for Accelerator Controls +Publisher&rsquo;s entry: 10589/144860. + + + diff --git a/tags/gui/page/1/index.html b/tags/gui/page/1/index.html new file mode 100644 index 00000000..0e50688f --- /dev/null +++ b/tags/gui/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/gui/ + + + + + + diff --git a/tags/haystack-2.0/index.html b/tags/haystack-2.0/index.html new file mode 100644 index 00000000..781f021b --- /dev/null +++ b/tags/haystack-2.0/index.html @@ -0,0 +1,264 @@ + + + + + + Haystack 2.0 · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Haystack 2.0 + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/haystack-2.0/index.xml b/tags/haystack-2.0/index.xml new file mode 100644 index 00000000..d092967e --- /dev/null +++ b/tags/haystack-2.0/index.xml @@ -0,0 +1,93 @@ + + + + Haystack 2.0 on Sara Zan + https://www.zansara.dev/tags/haystack-2.0/ + Recent content in Haystack 2.0 on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + Canals: a new concept of Pipeline + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + As we have seen in the previous episode of this series, Haystack&rsquo;s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. +What does this mean in practice? Let&rsquo;s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + Office Hours: RAG Pipelines + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Recording: Office Hours - RAG Pipelines +Notebook: RAG_Pipelines.ipynb +All the material can also be found here. +In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. +In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. + + + Why rewriting Haystack?! + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Wed, 11 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Before even diving into what Haystack 2.0 is, how it was built, and how it works, let&rsquo;s spend a few words about the whats and the whys. +First of all, what is Haystack? +And next, why on Earth did we decide to rewrite it from the ground up? +A Pioneer Framework Link to heading Haystack is a relatively young framework, its initial release dating back to November 28th, 2019. Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. + + + Haystack 2.0: What is it? + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + Tue, 10 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + December is finally approaching, and with it the release of a Haystack 2.0. At deepset, we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline. +But what is it that makes this release so special? +In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. + + + Office Hours: Haystack 2.0 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Thu, 03 Aug 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Recording: Haystack v2 - Office Hours +Slides: Haystack v2 - Status Update +All the material can also be found here. +In this Office Hours I&rsquo;ve presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a preview module in the latest Haystack 1. + + + diff --git a/tags/haystack-2.0/page/1/index.html b/tags/haystack-2.0/page/1/index.html new file mode 100644 index 00000000..4bddf117 --- /dev/null +++ b/tags/haystack-2.0/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/haystack-2.0/ + + + + + + diff --git a/tags/haystack/index.html b/tags/haystack/index.html new file mode 100644 index 00000000..055ca99a --- /dev/null +++ b/tags/haystack/index.html @@ -0,0 +1,268 @@ + + + + + + Haystack · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Haystack + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/haystack/index.xml b/tags/haystack/index.xml new file mode 100644 index 00000000..ab9c4193 --- /dev/null +++ b/tags/haystack/index.xml @@ -0,0 +1,105 @@ + + + + Haystack on Sara Zan + https://www.zansara.dev/tags/haystack/ + Recent content in Haystack on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + Canals: a new concept of Pipeline + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + As we have seen in the previous episode of this series, Haystack&rsquo;s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. +What does this mean in practice? Let&rsquo;s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + Office Hours: RAG Pipelines + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Recording: Office Hours - RAG Pipelines +Notebook: RAG_Pipelines.ipynb +All the material can also be found here. +In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. +In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. + + + Why rewriting Haystack?! + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Wed, 11 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Before even diving into what Haystack 2.0 is, how it was built, and how it works, let&rsquo;s spend a few words about the whats and the whys. +First of all, what is Haystack? +And next, why on Earth did we decide to rewrite it from the ground up? +A Pioneer Framework Link to heading Haystack is a relatively young framework, its initial release dating back to November 28th, 2019. Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. + + + Haystack 2.0: What is it? + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + Tue, 10 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + December is finally approaching, and with it the release of a Haystack 2.0. At deepset, we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline. +But what is it that makes this release so special? +In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. + + + Office Hours: Haystack 2.0 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Thu, 03 Aug 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Recording: Haystack v2 - Office Hours +Slides: Haystack v2 - Status Update +All the material can also be found here. +In this Office Hours I&rsquo;ve presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a preview module in the latest Haystack 1. + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + diff --git a/tags/haystack/page/1/index.html b/tags/haystack/page/1/index.html new file mode 100644 index 00000000..e0c594a8 --- /dev/null +++ b/tags/haystack/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/haystack/ + + + + + + diff --git a/tags/hiking/index.html b/tags/hiking/index.html new file mode 100644 index 00000000..31c21ad1 --- /dev/null +++ b/tags/hiking/index.html @@ -0,0 +1,250 @@ + + + + + + Hiking · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Hiking + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/hiking/index.xml b/tags/hiking/index.xml new file mode 100644 index 00000000..4465164b --- /dev/null +++ b/tags/hiking/index.xml @@ -0,0 +1,39 @@ + + + + Hiking on Sara Zan + https://www.zansara.dev/tags/hiking/ + Recent content in Hiking on Sara Zan + Hugo -- gohugo.io + en + Mon, 24 May 2021 00:00:00 +0000 + + + ZanzoCam: An open-source alpine web camera + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Slides: ZanzoCam: An open-source alpine web camera +On May 24th 2021 I held a talk about the ZanzoCam project as invited speaker for the &ldquo;Hardware and Software Codesign&rdquo; course at Università di Pavia. +The slides go through the entire lifecycle of the ZanzoCam project, from the very inception of it, the market research, our decision process, earlier prototypes, and then goes into a more detailed explanation of the the design and implementation of the project from a hardware and software perspective, with some notes about our financial situation and project management. + + + ZanzoCam + https://www.zansara.dev/projects/zanzocam/ + Wed, 01 Jan 2020 00:00:00 +0000 + https://www.zansara.dev/projects/zanzocam/ + Main website: https://zanzocam.github.io/ +ZanzoCam is a low-power, low-frequency camera based on Raspberry Pi, designed to operate autonomously in remote locations and under harsh conditions. It was designed and developed between 2019 and 2021 for CAI Lombardia by a team of two people, with me as the software developer and the other responsible for the hardware design. CAI later deployed several of these devices on their affiliate huts. +ZanzoCams are designed to work reliably in the harsh conditions of alpine winters, be as power-efficient as possible, and tolerate unstable network connections: they feature a robust HTTP- or FTP-based picture upload strategy which is remotely configurable from a very simple, single-file web panel. + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + diff --git a/tags/hiking/page/1/index.html b/tags/hiking/page/1/index.html new file mode 100644 index 00000000..5af8530e --- /dev/null +++ b/tags/hiking/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/hiking/ + + + + + + diff --git a/tags/html/index.html b/tags/html/index.html new file mode 100644 index 00000000..c8e56bdd --- /dev/null +++ b/tags/html/index.html @@ -0,0 +1,237 @@ + + + + + + HTML · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + HTML + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/html/index.xml b/tags/html/index.xml new file mode 100644 index 00000000..f89636a3 --- /dev/null +++ b/tags/html/index.xml @@ -0,0 +1,21 @@ + + + + HTML on Sara Zan + https://www.zansara.dev/tags/html/ + Recent content in HTML on Sara Zan + Hugo -- gohugo.io + en + Fri, 01 Jan 2016 00:00:00 +0000 + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + diff --git a/tags/html/page/1/index.html b/tags/html/page/1/index.html new file mode 100644 index 00000000..1eeb7dd9 --- /dev/null +++ b/tags/html/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/html/ + + + + + + diff --git a/tags/hybrid-retrieval/index.html b/tags/hybrid-retrieval/index.html new file mode 100644 index 00000000..cf276c6d --- /dev/null +++ b/tags/hybrid-retrieval/index.html @@ -0,0 +1,232 @@ + + + + + + Hybrid Retrieval · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Hybrid Retrieval + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/hybrid-retrieval/index.xml b/tags/hybrid-retrieval/index.xml new file mode 100644 index 00000000..41d4453e --- /dev/null +++ b/tags/hybrid-retrieval/index.xml @@ -0,0 +1,20 @@ + + + + Hybrid Retrieval on Sara Zan + https://www.zansara.dev/tags/hybrid-retrieval/ + Recent content in Hybrid Retrieval on Sara Zan + Hugo -- gohugo.io + en + Sun, 15 Oct 2023 00:00:00 +0000 + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + diff --git a/tags/hybrid-retrieval/page/1/index.html b/tags/hybrid-retrieval/page/1/index.html new file mode 100644 index 00000000..84af2f26 --- /dev/null +++ b/tags/hybrid-retrieval/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/hybrid-retrieval/ + + + + + + diff --git a/tags/images/index.html b/tags/images/index.html new file mode 100644 index 00000000..6d3418ad --- /dev/null +++ b/tags/images/index.html @@ -0,0 +1,232 @@ + + + + + + Images · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Images + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/images/index.xml b/tags/images/index.xml new file mode 100644 index 00000000..72a953cd --- /dev/null +++ b/tags/images/index.xml @@ -0,0 +1,24 @@ + + + + Images on Sara Zan + https://www.zansara.dev/tags/images/ + Recent content in Images on Sara Zan + Hugo -- gohugo.io + en + Thu, 01 Dec 2022 00:00:00 +0000 + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + diff --git a/tags/images/page/1/index.html b/tags/images/page/1/index.html new file mode 100644 index 00000000..1fc8a6a0 --- /dev/null +++ b/tags/images/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/images/ + + + + + + diff --git a/tags/index.html b/tags/index.html new file mode 100644 index 00000000..784ee574 --- /dev/null +++ b/tags/index.html @@ -0,0 +1,770 @@ + + + + + Tags · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ Tags +

+
+ + +
+ + +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/index.xml b/tags/index.xml new file mode 100644 index 00000000..3fcff544 --- /dev/null +++ b/tags/index.xml @@ -0,0 +1,362 @@ + + + + Tags on Sara Zan + https://www.zansara.dev/tags/ + Recent content in Tags on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + GPT + https://www.zansara.dev/tags/gpt/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/gpt/ + + + + Haystack + https://www.zansara.dev/tags/haystack/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/haystack/ + + + + Haystack 2.0 + https://www.zansara.dev/tags/haystack-2.0/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/haystack-2.0/ + + + + indexing + https://www.zansara.dev/tags/indexing/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/indexing/ + + + + LLM + https://www.zansara.dev/tags/llm/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/llm/ + + + + NLP + https://www.zansara.dev/tags/nlp/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/nlp/ + + + + Python + https://www.zansara.dev/tags/python/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/python/ + + + + RAG + https://www.zansara.dev/tags/rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/rag/ + + + + Retrieval Augmentation + https://www.zansara.dev/tags/retrieval-augmentation/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/retrieval-augmentation/ + + + + Web + https://www.zansara.dev/tags/web/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/web/ + + + + Document Store + https://www.zansara.dev/tags/document-store/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/tags/document-store/ + + + + Semantic Search + https://www.zansara.dev/tags/semantic-search/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/tags/semantic-search/ + + + + API Design + https://www.zansara.dev/tags/api-design/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/tags/api-design/ + + + + Canals + https://www.zansara.dev/tags/canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/tags/canals/ + + + + DAG + https://www.zansara.dev/tags/dag/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/tags/dag/ + + + + Pipeline + https://www.zansara.dev/tags/pipeline/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/tags/pipeline/ + + + + graph + https://www.zansara.dev/tags/graph/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/tags/graph/ + + + + Hybrid Retrieval + https://www.zansara.dev/tags/hybrid-retrieval/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/tags/hybrid-retrieval/ + + + + AI + https://www.zansara.dev/tags/ai/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/tags/ai/ + + + + Office Hours + https://www.zansara.dev/tags/office-hours/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/tags/office-hours/ + + + + API + https://www.zansara.dev/tags/api/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/tags/api/ + + + + Conjugations + https://www.zansara.dev/tags/conjugations/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/tags/conjugations/ + + + + Flashcards + https://www.zansara.dev/tags/flashcards/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/tags/flashcards/ + + + + Languages + https://www.zansara.dev/tags/languages/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/tags/languages/ + + + + Linux + https://www.zansara.dev/tags/linux/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/tags/linux/ + + + + SDK + https://www.zansara.dev/tags/sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/tags/sdk/ + + + + Verbix + https://www.zansara.dev/tags/verbix/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/tags/verbix/ + + + + Colab + https://www.zansara.dev/tags/colab/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/tags/colab/ + + + + Images + https://www.zansara.dev/tags/images/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/tags/images/ + + + + Multi Modality + https://www.zansara.dev/tags/multi-modality/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/tags/multi-modality/ + + + + OpenNLP Meetup + https://www.zansara.dev/tags/opennlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/tags/opennlp-meetup/ + + + + Retrieval + https://www.zansara.dev/tags/retrieval/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/tags/retrieval/ + + + + Text to Image + https://www.zansara.dev/tags/text-to-image/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/tags/text-to-image/ + + + + CERN + https://www.zansara.dev/tags/cern/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/tags/cern/ + + + + GUI + https://www.zansara.dev/tags/gui/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/tags/gui/ + + + + JavaFX + https://www.zansara.dev/tags/javafx/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/tags/javafx/ + + + + Physics + https://www.zansara.dev/tags/physics/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/tags/physics/ + + + + PyQt + https://www.zansara.dev/tags/pyqt/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/tags/pyqt/ + + + + Qt + https://www.zansara.dev/tags/qt/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/tags/qt/ + + + + Data Science + https://www.zansara.dev/tags/data-science/ + Sat, 11 Dec 2021 00:00:00 +0000 + https://www.zansara.dev/tags/data-science/ + + + + CAI + https://www.zansara.dev/tags/cai/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/tags/cai/ + + + + Hiking + https://www.zansara.dev/tags/hiking/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/tags/hiking/ + + + + Raspberry Pi + https://www.zansara.dev/tags/raspberry-pi/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/tags/raspberry-pi/ + + + + ZanzoCam + https://www.zansara.dev/tags/zanzocam/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/tags/zanzocam/ + + + + CSS + https://www.zansara.dev/tags/css/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/tags/css/ + + + + HTML + https://www.zansara.dev/tags/html/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/tags/html/ + + + + JavaScript + https://www.zansara.dev/tags/javascript/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/tags/javascript/ + + + + MySQL + https://www.zansara.dev/tags/mysql/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/tags/mysql/ + + + + PHP + https://www.zansara.dev/tags/php/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/tags/php/ + + + + PHP 8 + https://www.zansara.dev/tags/php-8/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/tags/php-8/ + + + + diff --git a/tags/indexing/index.html b/tags/indexing/index.html new file mode 100644 index 00000000..875f3537 --- /dev/null +++ b/tags/indexing/index.html @@ -0,0 +1,236 @@ + + + + + + indexing · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + indexing + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/indexing/index.xml b/tags/indexing/index.xml new file mode 100644 index 00000000..7638488c --- /dev/null +++ b/tags/indexing/index.xml @@ -0,0 +1,28 @@ + + + + indexing on Sara Zan + https://www.zansara.dev/tags/indexing/ + Recent content in indexing on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + diff --git a/tags/indexing/page/1/index.html b/tags/indexing/page/1/index.html new file mode 100644 index 00000000..5d854542 --- /dev/null +++ b/tags/indexing/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/indexing/ + + + + + + diff --git a/tags/javafx/index.html b/tags/javafx/index.html new file mode 100644 index 00000000..34a13709 --- /dev/null +++ b/tags/javafx/index.html @@ -0,0 +1,240 @@ + + + + + + JavaFX · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + + + +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/javafx/index.xml b/tags/javafx/index.xml new file mode 100644 index 00000000..72ec50bb --- /dev/null +++ b/tags/javafx/index.xml @@ -0,0 +1,35 @@ + + + + JavaFX on Sara Zan + https://www.zansara.dev/tags/javafx/ + Recent content in JavaFX on Sara Zan + Hugo -- gohugo.io + en + Tue, 01 Mar 2022 00:00:00 +0000 + + + Adopting PyQt For Beam Instrumentation GUI Development At CERN + https://www.zansara.dev/publications/thpv014/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/publications/thpv014/ + Abstract Link to heading As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. + + + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs + https://www.zansara.dev/publications/tucpr03/ + Sun, 30 Aug 2020 00:00:00 +0000 + https://www.zansara.dev/publications/tucpr03/ + Abstract Link to heading For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. + + + Evaluation of Qt as GUI Framework for Accelerator Controls + https://www.zansara.dev/publications/msc-thesis/ + Thu, 20 Dec 2018 00:00:00 +0000 + https://www.zansara.dev/publications/msc-thesis/ + This is the full-text of my MSc thesis, written in collaboration with Politecnico di Milano and CERN. +Get the full text here: Evaluation of Qt as GUI Framework for Accelerator Controls +Publisher&rsquo;s entry: 10589/144860. + + + diff --git a/tags/javafx/page/1/index.html b/tags/javafx/page/1/index.html new file mode 100644 index 00000000..1cebeb88 --- /dev/null +++ b/tags/javafx/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/javafx/ + + + + + + diff --git a/tags/javascript/index.html b/tags/javascript/index.html new file mode 100644 index 00000000..ae38b3e7 --- /dev/null +++ b/tags/javascript/index.html @@ -0,0 +1,237 @@ + + + + + + JavaScript · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + JavaScript + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/javascript/index.xml b/tags/javascript/index.xml new file mode 100644 index 00000000..ac54adb2 --- /dev/null +++ b/tags/javascript/index.xml @@ -0,0 +1,21 @@ + + + + JavaScript on Sara Zan + https://www.zansara.dev/tags/javascript/ + Recent content in JavaScript on Sara Zan + Hugo -- gohugo.io + en + Fri, 01 Jan 2016 00:00:00 +0000 + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + diff --git a/tags/javascript/page/1/index.html b/tags/javascript/page/1/index.html new file mode 100644 index 00000000..62f18fa6 --- /dev/null +++ b/tags/javascript/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/javascript/ + + + + + + diff --git a/tags/languages/index.html b/tags/languages/index.html new file mode 100644 index 00000000..f784e071 --- /dev/null +++ b/tags/languages/index.html @@ -0,0 +1,232 @@ + + + + + + Languages · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Languages + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/languages/index.xml b/tags/languages/index.xml new file mode 100644 index 00000000..30f6f477 --- /dev/null +++ b/tags/languages/index.xml @@ -0,0 +1,23 @@ + + + + Languages on Sara Zan + https://www.zansara.dev/tags/languages/ + Recent content in Languages on Sara Zan + Hugo -- gohugo.io + en + Sun, 10 Sep 2023 00:00:00 +0000 + + + An (unofficial) Python SDK for Verbix + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + PyPI package: https://pypi.org/project/verbix-sdk/ +GitHub Repo: https://github.com/ZanSara/verbix-sdk +Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md +As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were &ldquo;simple&rdquo;: +Supports many languages, including Italian, Portuguese and Hungarian Conjugates irregulars properly Offers an API access to the conjugation tables Refuses to conjugate anything except for known verbs (Optional) Highlights the irregularities in some way Surprisingly these seem to be a shortage of good alternatives in this field. + + + diff --git a/tags/languages/page/1/index.html b/tags/languages/page/1/index.html new file mode 100644 index 00000000..b8fc96f3 --- /dev/null +++ b/tags/languages/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/languages/ + + + + + + diff --git a/tags/linux/index.html b/tags/linux/index.html new file mode 100644 index 00000000..541b401e --- /dev/null +++ b/tags/linux/index.html @@ -0,0 +1,236 @@ + + + + + + Linux · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Linux + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/linux/index.xml b/tags/linux/index.xml new file mode 100644 index 00000000..156f58c2 --- /dev/null +++ b/tags/linux/index.xml @@ -0,0 +1,32 @@ + + + + Linux on Sara Zan + https://www.zansara.dev/tags/linux/ + Recent content in Linux on Sara Zan + Hugo -- gohugo.io + en + Sun, 10 Sep 2023 00:00:00 +0000 + + + An (unofficial) Python SDK for Verbix + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + PyPI package: https://pypi.org/project/verbix-sdk/ +GitHub Repo: https://github.com/ZanSara/verbix-sdk +Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md +As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were &ldquo;simple&rdquo;: +Supports many languages, including Italian, Portuguese and Hungarian Conjugates irregulars properly Offers an API access to the conjugation tables Refuses to conjugate anything except for known verbs (Optional) Highlights the irregularities in some way Surprisingly these seem to be a shortage of good alternatives in this field. + + + My Dotfiles + https://www.zansara.dev/posts/2021-12-11-dotfiles/ + Sat, 11 Dec 2021 00:00:00 +0000 + https://www.zansara.dev/posts/2021-12-11-dotfiles/ + GitHub Repo: https://github.com/ZanSara/dotfiles +What Linux developer would I be if I didn&rsquo;t also have my very own dotfiles repo? +After many years of iterations I finally found a combination that lasted quite a while, so I figured it&rsquo;s time to treat them as a real project. It was originally optimized for my laptop, but then I realized it works quite well on my three-monitor desk setup as well without major issues. + + + diff --git a/tags/linux/page/1/index.html b/tags/linux/page/1/index.html new file mode 100644 index 00000000..ec5ba00d --- /dev/null +++ b/tags/linux/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/linux/ + + + + + + diff --git a/tags/llm/index.html b/tags/llm/index.html new file mode 100644 index 00000000..1fbc94b1 --- /dev/null +++ b/tags/llm/index.html @@ -0,0 +1,260 @@ + + + + + + LLM · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + LLM + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/llm/index.xml b/tags/llm/index.xml new file mode 100644 index 00000000..fff41294 --- /dev/null +++ b/tags/llm/index.xml @@ -0,0 +1,89 @@ + + + + LLM on Sara Zan + https://www.zansara.dev/tags/llm/ + Recent content in LLM on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + Office Hours: RAG Pipelines + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Recording: Office Hours - RAG Pipelines +Notebook: RAG_Pipelines.ipynb +All the material can also be found here. +In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. +In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. + + + Why rewriting Haystack?! + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Wed, 11 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Before even diving into what Haystack 2.0 is, how it was built, and how it works, let&rsquo;s spend a few words about the whats and the whys. +First of all, what is Haystack? +And next, why on Earth did we decide to rewrite it from the ground up? +A Pioneer Framework Link to heading Haystack is a relatively young framework, its initial release dating back to November 28th, 2019. Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. + + + Haystack 2.0: What is it? + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + Tue, 10 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + December is finally approaching, and with it the release of a Haystack 2.0. At deepset, we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline. +But what is it that makes this release so special? +In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. + + + Office Hours: Haystack 2.0 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Thu, 03 Aug 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Recording: Haystack v2 - Office Hours +Slides: Haystack v2 - Status Update +All the material can also be found here. +In this Office Hours I&rsquo;ve presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a preview module in the latest Haystack 1. + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + diff --git a/tags/llm/page/1/index.html b/tags/llm/page/1/index.html new file mode 100644 index 00000000..9acb2de3 --- /dev/null +++ b/tags/llm/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/llm/ + + + + + + diff --git a/tags/multi-modality/index.html b/tags/multi-modality/index.html new file mode 100644 index 00000000..a42bbb90 --- /dev/null +++ b/tags/multi-modality/index.html @@ -0,0 +1,232 @@ + + + + + + Multi Modality · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Multi Modality + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/multi-modality/index.xml b/tags/multi-modality/index.xml new file mode 100644 index 00000000..e3436f5d --- /dev/null +++ b/tags/multi-modality/index.xml @@ -0,0 +1,24 @@ + + + + Multi Modality on Sara Zan + https://www.zansara.dev/tags/multi-modality/ + Recent content in Multi Modality on Sara Zan + Hugo -- gohugo.io + en + Thu, 01 Dec 2022 00:00:00 +0000 + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + diff --git a/tags/multi-modality/page/1/index.html b/tags/multi-modality/page/1/index.html new file mode 100644 index 00000000..7c631b60 --- /dev/null +++ b/tags/multi-modality/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/multi-modality/ + + + + + + diff --git a/tags/mysql/index.html b/tags/mysql/index.html new file mode 100644 index 00000000..958dd3a5 --- /dev/null +++ b/tags/mysql/index.html @@ -0,0 +1,237 @@ + + + + + + MySQL · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + MySQL + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/mysql/index.xml b/tags/mysql/index.xml new file mode 100644 index 00000000..aeadcf3d --- /dev/null +++ b/tags/mysql/index.xml @@ -0,0 +1,21 @@ + + + + MySQL on Sara Zan + https://www.zansara.dev/tags/mysql/ + Recent content in MySQL on Sara Zan + Hugo -- gohugo.io + en + Fri, 01 Jan 2016 00:00:00 +0000 + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + diff --git a/tags/mysql/page/1/index.html b/tags/mysql/page/1/index.html new file mode 100644 index 00000000..fdabb92c --- /dev/null +++ b/tags/mysql/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/mysql/ + + + + + + diff --git a/tags/nlp/index.html b/tags/nlp/index.html new file mode 100644 index 00000000..40e2e9a1 --- /dev/null +++ b/tags/nlp/index.html @@ -0,0 +1,264 @@ + + + + + + NLP · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + NLP + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/nlp/index.xml b/tags/nlp/index.xml new file mode 100644 index 00000000..73fcdb20 --- /dev/null +++ b/tags/nlp/index.xml @@ -0,0 +1,97 @@ + + + + NLP on Sara Zan + https://www.zansara.dev/tags/nlp/ + Recent content in NLP on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + Office Hours: RAG Pipelines + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Recording: Office Hours - RAG Pipelines +Notebook: RAG_Pipelines.ipynb +All the material can also be found here. +In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. +In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. + + + Why rewriting Haystack?! + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Wed, 11 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Before even diving into what Haystack 2.0 is, how it was built, and how it works, let&rsquo;s spend a few words about the whats and the whys. +First of all, what is Haystack? +And next, why on Earth did we decide to rewrite it from the ground up? +A Pioneer Framework Link to heading Haystack is a relatively young framework, its initial release dating back to November 28th, 2019. Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. + + + Haystack 2.0: What is it? + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + Tue, 10 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + December is finally approaching, and with it the release of a Haystack 2.0. At deepset, we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline. +But what is it that makes this release so special? +In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. + + + Office Hours: Haystack 2.0 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Thu, 03 Aug 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Recording: Haystack v2 - Office Hours +Slides: Haystack v2 - Status Update +All the material can also be found here. +In this Office Hours I&rsquo;ve presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a preview module in the latest Haystack 1. + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + diff --git a/tags/nlp/page/1/index.html b/tags/nlp/page/1/index.html new file mode 100644 index 00000000..314f519f --- /dev/null +++ b/tags/nlp/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/nlp/ + + + + + + diff --git a/tags/office-hours/index.html b/tags/office-hours/index.html new file mode 100644 index 00000000..ebc758b6 --- /dev/null +++ b/tags/office-hours/index.html @@ -0,0 +1,236 @@ + + + + + + Office Hours · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Office Hours + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/office-hours/index.xml b/tags/office-hours/index.xml new file mode 100644 index 00000000..c364a4db --- /dev/null +++ b/tags/office-hours/index.xml @@ -0,0 +1,33 @@ + + + + Office Hours on Sara Zan + https://www.zansara.dev/tags/office-hours/ + Recent content in Office Hours on Sara Zan + Hugo -- gohugo.io + en + Thu, 12 Oct 2023 00:00:00 +0000 + + + Office Hours: RAG Pipelines + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Recording: Office Hours - RAG Pipelines +Notebook: RAG_Pipelines.ipynb +All the material can also be found here. +In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. +In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. + + + Office Hours: Haystack 2.0 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Thu, 03 Aug 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Recording: Haystack v2 - Office Hours +Slides: Haystack v2 - Status Update +All the material can also be found here. +In this Office Hours I&rsquo;ve presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a preview module in the latest Haystack 1. + + + diff --git a/tags/office-hours/page/1/index.html b/tags/office-hours/page/1/index.html new file mode 100644 index 00000000..f3a875e6 --- /dev/null +++ b/tags/office-hours/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/office-hours/ + + + + + + diff --git a/tags/opennlp-meetup/index.html b/tags/opennlp-meetup/index.html new file mode 100644 index 00000000..321a1ca0 --- /dev/null +++ b/tags/opennlp-meetup/index.html @@ -0,0 +1,232 @@ + + + + + + OpenNLP Meetup · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + OpenNLP Meetup + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/opennlp-meetup/index.xml b/tags/opennlp-meetup/index.xml new file mode 100644 index 00000000..6c1f2f0f --- /dev/null +++ b/tags/opennlp-meetup/index.xml @@ -0,0 +1,24 @@ + + + + OpenNLP Meetup on Sara Zan + https://www.zansara.dev/tags/opennlp-meetup/ + Recent content in OpenNLP Meetup on Sara Zan + Hugo -- gohugo.io + en + Thu, 01 Dec 2022 00:00:00 +0000 + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + diff --git a/tags/opennlp-meetup/page/1/index.html b/tags/opennlp-meetup/page/1/index.html new file mode 100644 index 00000000..76de0214 --- /dev/null +++ b/tags/opennlp-meetup/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/opennlp-meetup/ + + + + + + diff --git a/tags/php-8/index.html b/tags/php-8/index.html new file mode 100644 index 00000000..ed137a81 --- /dev/null +++ b/tags/php-8/index.html @@ -0,0 +1,237 @@ + + + + + + PHP 8 · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + PHP 8 + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/php-8/index.xml b/tags/php-8/index.xml new file mode 100644 index 00000000..3b718c19 --- /dev/null +++ b/tags/php-8/index.xml @@ -0,0 +1,21 @@ + + + + PHP 8 on Sara Zan + https://www.zansara.dev/tags/php-8/ + Recent content in PHP 8 on Sara Zan + Hugo -- gohugo.io + en + Fri, 01 Jan 2016 00:00:00 +0000 + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + diff --git a/tags/php-8/page/1/index.html b/tags/php-8/page/1/index.html new file mode 100644 index 00000000..063ecfc7 --- /dev/null +++ b/tags/php-8/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/php-8/ + + + + + + diff --git a/tags/php/index.html b/tags/php/index.html new file mode 100644 index 00000000..eb348e64 --- /dev/null +++ b/tags/php/index.html @@ -0,0 +1,237 @@ + + + + + + PHP · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + PHP + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/php/index.xml b/tags/php/index.xml new file mode 100644 index 00000000..910eb578 --- /dev/null +++ b/tags/php/index.xml @@ -0,0 +1,21 @@ + + + + PHP on Sara Zan + https://www.zansara.dev/tags/php/ + Recent content in PHP on Sara Zan + Hugo -- gohugo.io + en + Fri, 01 Jan 2016 00:00:00 +0000 + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + diff --git a/tags/php/page/1/index.html b/tags/php/page/1/index.html new file mode 100644 index 00000000..1fccd8da --- /dev/null +++ b/tags/php/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/php/ + + + + + + diff --git a/tags/physics/index.html b/tags/physics/index.html new file mode 100644 index 00000000..37bd6fb8 --- /dev/null +++ b/tags/physics/index.html @@ -0,0 +1,244 @@ + + + + + + Physics · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + + + +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/physics/index.xml b/tags/physics/index.xml new file mode 100644 index 00000000..f1547ee0 --- /dev/null +++ b/tags/physics/index.xml @@ -0,0 +1,42 @@ + + + + Physics on Sara Zan + https://www.zansara.dev/tags/physics/ + Recent content in Physics on Sara Zan + Hugo -- gohugo.io + en + Tue, 01 Mar 2022 00:00:00 +0000 + + + Adopting PyQt For Beam Instrumentation GUI Development At CERN + https://www.zansara.dev/publications/thpv014/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/publications/thpv014/ + Abstract Link to heading As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. + + + Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF) + https://www.zansara.dev/publications/thpv042/ + Sat, 11 Dec 2021 00:00:00 +0000 + https://www.zansara.dev/publications/thpv042/ + Abstract Link to heading The CERN accelerators require a large number of instruments, measuring different beam parameters like position, losses, current etc. The instruments’ associated electronics and software also produce information about their status. All these data are stored in a database for later analysis. The Beam Instrumentation group developed the Offline Analysis Framework some years ago to regularly and systematically analyze these data. The framework has been successfully used for nearly 100 different analyses that ran regularly by the end of the LHC run 2. + + + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs + https://www.zansara.dev/publications/tucpr03/ + Sun, 30 Aug 2020 00:00:00 +0000 + https://www.zansara.dev/publications/tucpr03/ + Abstract Link to heading For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. + + + Evaluation of Qt as GUI Framework for Accelerator Controls + https://www.zansara.dev/publications/msc-thesis/ + Thu, 20 Dec 2018 00:00:00 +0000 + https://www.zansara.dev/publications/msc-thesis/ + This is the full-text of my MSc thesis, written in collaboration with Politecnico di Milano and CERN. +Get the full text here: Evaluation of Qt as GUI Framework for Accelerator Controls +Publisher&rsquo;s entry: 10589/144860. + + + diff --git a/tags/physics/page/1/index.html b/tags/physics/page/1/index.html new file mode 100644 index 00000000..f6daacd1 --- /dev/null +++ b/tags/physics/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/physics/ + + + + + + diff --git a/tags/pipeline/index.html b/tags/pipeline/index.html new file mode 100644 index 00000000..82db0578 --- /dev/null +++ b/tags/pipeline/index.html @@ -0,0 +1,236 @@ + + + + + + Pipeline · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Pipeline + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/pipeline/index.xml b/tags/pipeline/index.xml new file mode 100644 index 00000000..826fef0a --- /dev/null +++ b/tags/pipeline/index.xml @@ -0,0 +1,28 @@ + + + + Pipeline on Sara Zan + https://www.zansara.dev/tags/pipeline/ + Recent content in Pipeline on Sara Zan + Hugo -- gohugo.io + en + Thu, 26 Oct 2023 00:00:00 +0000 + + + Canals: a new concept of Pipeline + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + As we have seen in the previous episode of this series, Haystack&rsquo;s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. +What does this mean in practice? Let&rsquo;s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + diff --git a/tags/pipeline/page/1/index.html b/tags/pipeline/page/1/index.html new file mode 100644 index 00000000..b0f15220 --- /dev/null +++ b/tags/pipeline/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/pipeline/ + + + + + + diff --git a/tags/pyqt/index.html b/tags/pyqt/index.html new file mode 100644 index 00000000..be52dd9c --- /dev/null +++ b/tags/pyqt/index.html @@ -0,0 +1,240 @@ + + + + + + PyQt · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + + + +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/pyqt/index.xml b/tags/pyqt/index.xml new file mode 100644 index 00000000..aa74c172 --- /dev/null +++ b/tags/pyqt/index.xml @@ -0,0 +1,35 @@ + + + + PyQt on Sara Zan + https://www.zansara.dev/tags/pyqt/ + Recent content in PyQt on Sara Zan + Hugo -- gohugo.io + en + Tue, 01 Mar 2022 00:00:00 +0000 + + + Adopting PyQt For Beam Instrumentation GUI Development At CERN + https://www.zansara.dev/publications/thpv014/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/publications/thpv014/ + Abstract Link to heading As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. + + + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs + https://www.zansara.dev/publications/tucpr03/ + Sun, 30 Aug 2020 00:00:00 +0000 + https://www.zansara.dev/publications/tucpr03/ + Abstract Link to heading For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. + + + Evaluation of Qt as GUI Framework for Accelerator Controls + https://www.zansara.dev/publications/msc-thesis/ + Thu, 20 Dec 2018 00:00:00 +0000 + https://www.zansara.dev/publications/msc-thesis/ + This is the full-text of my MSc thesis, written in collaboration with Politecnico di Milano and CERN. +Get the full text here: Evaluation of Qt as GUI Framework for Accelerator Controls +Publisher&rsquo;s entry: 10589/144860. + + + diff --git a/tags/pyqt/page/1/index.html b/tags/pyqt/page/1/index.html new file mode 100644 index 00000000..e46f870d --- /dev/null +++ b/tags/pyqt/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/pyqt/ + + + + + + diff --git a/tags/python/index.html b/tags/python/index.html new file mode 100644 index 00000000..64dbcd04 --- /dev/null +++ b/tags/python/index.html @@ -0,0 +1,301 @@ + + + + + + Python · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Python + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/python/index.xml b/tags/python/index.xml new file mode 100644 index 00000000..d5ddddcb --- /dev/null +++ b/tags/python/index.xml @@ -0,0 +1,164 @@ + + + + Python on Sara Zan + https://www.zansara.dev/tags/python/ + Recent content in Python on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + Canals: a new concept of Pipeline + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + Thu, 26 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-26-haystack-series-canals/ + As we have seen in the previous episode of this series, Haystack&rsquo;s Pipeline is a powerful concept that comes with its set of benefits and shortcomings. In Haystack 2.0, the pipeline was one of the first items that we focused our attention on, and it was the starting point of the entire rewrite. +What does this mean in practice? Let&rsquo;s look at what Haystack Pipelines in 2.0 will be like, how they differ from their 1. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + Office Hours: RAG Pipelines + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Recording: Office Hours - RAG Pipelines +Notebook: RAG_Pipelines.ipynb +All the material can also be found here. +In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. +In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. + + + Why rewriting Haystack?! + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Wed, 11 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-11-haystack-series-why/ + Before even diving into what Haystack 2.0 is, how it was built, and how it works, let&rsquo;s spend a few words about the whats and the whys. +First of all, what is Haystack? +And next, why on Earth did we decide to rewrite it from the ground up? +A Pioneer Framework Link to heading Haystack is a relatively young framework, its initial release dating back to November 28th, 2019. Back then, Natural Language Processing was a field that had just started moving its first step outside of research labs, and Haystack was one of the first libraries that promised enterprise-grade, production-ready NLP features. + + + Haystack 2.0: What is it? + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + Tue, 10 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-10-haystack-series-intro/ + December is finally approaching, and with it the release of a Haystack 2.0. At deepset, we’ve been talking about it for months, we’ve been iterating on the core concepts what feels like a million times, and it looks like we’re finally getting ready for the approaching deadline. +But what is it that makes this release so special? +In short, Haystack 2.0 is a complete rewrite. A huge, big-bang style change. Almost no code survived the migration unmodified: we’ve been across the entire 100,000+ lines of the codebase and redone everything in under a year. + + + An (unofficial) Python SDK for Verbix + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + PyPI package: https://pypi.org/project/verbix-sdk/ +GitHub Repo: https://github.com/ZanSara/verbix-sdk +Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md +As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were &ldquo;simple&rdquo;: +Supports many languages, including Italian, Portuguese and Hungarian Conjugates irregulars properly Offers an API access to the conjugation tables Refuses to conjugate anything except for known verbs (Optional) Highlights the irregularities in some way Surprisingly these seem to be a shortage of good alternatives in this field. + + + Office Hours: Haystack 2.0 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Thu, 03 Aug 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Recording: Haystack v2 - Office Hours +Slides: Haystack v2 - Status Update +All the material can also be found here. +In this Office Hours I&rsquo;ve presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a preview module in the latest Haystack 1. + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + Adopting PyQt For Beam Instrumentation GUI Development At CERN + https://www.zansara.dev/publications/thpv014/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/publications/thpv014/ + Abstract Link to heading As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. + + + Evolution of the CERN Beam Instrumentation Offline Analysis Framework (OAF) + https://www.zansara.dev/publications/thpv042/ + Sat, 11 Dec 2021 00:00:00 +0000 + https://www.zansara.dev/publications/thpv042/ + Abstract Link to heading The CERN accelerators require a large number of instruments, measuring different beam parameters like position, losses, current etc. The instruments’ associated electronics and software also produce information about their status. All these data are stored in a database for later analysis. The Beam Instrumentation group developed the Offline Analysis Framework some years ago to regularly and systematically analyze these data. The framework has been successfully used for nearly 100 different analyses that ran regularly by the end of the LHC run 2. + + + ZanzoCam: An open-source alpine web camera + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Slides: ZanzoCam: An open-source alpine web camera +On May 24th 2021 I held a talk about the ZanzoCam project as invited speaker for the &ldquo;Hardware and Software Codesign&rdquo; course at Università di Pavia. +The slides go through the entire lifecycle of the ZanzoCam project, from the very inception of it, the market research, our decision process, earlier prototypes, and then goes into a more detailed explanation of the the design and implementation of the project from a hardware and software perspective, with some notes about our financial situation and project management. + + + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs + https://www.zansara.dev/publications/tucpr03/ + Sun, 30 Aug 2020 00:00:00 +0000 + https://www.zansara.dev/publications/tucpr03/ + Abstract Link to heading For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. + + + ZanzoCam + https://www.zansara.dev/projects/zanzocam/ + Wed, 01 Jan 2020 00:00:00 +0000 + https://www.zansara.dev/projects/zanzocam/ + Main website: https://zanzocam.github.io/ +ZanzoCam is a low-power, low-frequency camera based on Raspberry Pi, designed to operate autonomously in remote locations and under harsh conditions. It was designed and developed between 2019 and 2021 for CAI Lombardia by a team of two people, with me as the software developer and the other responsible for the hardware design. CAI later deployed several of these devices on their affiliate huts. +ZanzoCams are designed to work reliably in the harsh conditions of alpine winters, be as power-efficient as possible, and tolerate unstable network connections: they feature a robust HTTP- or FTP-based picture upload strategy which is remotely configurable from a very simple, single-file web panel. + + + Evaluation of Qt as GUI Framework for Accelerator Controls + https://www.zansara.dev/publications/msc-thesis/ + Thu, 20 Dec 2018 00:00:00 +0000 + https://www.zansara.dev/publications/msc-thesis/ + This is the full-text of my MSc thesis, written in collaboration with Politecnico di Milano and CERN. +Get the full text here: Evaluation of Qt as GUI Framework for Accelerator Controls +Publisher&rsquo;s entry: 10589/144860. + + + diff --git a/tags/python/page/1/index.html b/tags/python/page/1/index.html new file mode 100644 index 00000000..632d97ae --- /dev/null +++ b/tags/python/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/python/ + + + + + + diff --git a/tags/qt/index.html b/tags/qt/index.html new file mode 100644 index 00000000..20a1ddef --- /dev/null +++ b/tags/qt/index.html @@ -0,0 +1,240 @@ + + + + + + Qt · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + + + +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/qt/index.xml b/tags/qt/index.xml new file mode 100644 index 00000000..1aaeefe3 --- /dev/null +++ b/tags/qt/index.xml @@ -0,0 +1,35 @@ + + + + Qt on Sara Zan + https://www.zansara.dev/tags/qt/ + Recent content in Qt on Sara Zan + Hugo -- gohugo.io + en + Tue, 01 Mar 2022 00:00:00 +0000 + + + Adopting PyQt For Beam Instrumentation GUI Development At CERN + https://www.zansara.dev/publications/thpv014/ + Tue, 01 Mar 2022 00:00:00 +0000 + https://www.zansara.dev/publications/thpv014/ + Abstract Link to heading As Java GUI toolkits become deprecated, the Beam Instrumentation (BI)group at CERN has investigated alternatives and selected PyQt as one of the suitable technologies for future GUIs, in accordance with the paper presented at ICALEPCS19. This paper presents tools created, or adapted, to seamlessly integrate future PyQt GUI development alongside current Java oriented workflows and the controls environment. This includes (a) creating a project template and a GUI management tool to ease and standardize our development process, (b) rewriting our previously Java-centric Expert GUI Launcher to be language-agnostic and (c) porting a selection of operational GUIs from Java to PyQt, to test the feasibility of the development process and identify bottlenecks. + + + Our Journey From Java to PyQt and Web For CERN Accelerator Control GUIs + https://www.zansara.dev/publications/tucpr03/ + Sun, 30 Aug 2020 00:00:00 +0000 + https://www.zansara.dev/publications/tucpr03/ + Abstract Link to heading For more than 15 years, operational GUIs for accelerator controls and some lab applications for equipment experts have been developed in Java, first with Swing and more recently with JavaFX. In March 2018, Oracle announced that Java GUIs were not part of their strategy anymore*. They will not ship JavaFX after Java 8 and there are hints that they would like to get rid of Swing as well. + + + Evaluation of Qt as GUI Framework for Accelerator Controls + https://www.zansara.dev/publications/msc-thesis/ + Thu, 20 Dec 2018 00:00:00 +0000 + https://www.zansara.dev/publications/msc-thesis/ + This is the full-text of my MSc thesis, written in collaboration with Politecnico di Milano and CERN. +Get the full text here: Evaluation of Qt as GUI Framework for Accelerator Controls +Publisher&rsquo;s entry: 10589/144860. + + + diff --git a/tags/qt/page/1/index.html b/tags/qt/page/1/index.html new file mode 100644 index 00000000..34436195 --- /dev/null +++ b/tags/qt/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/qt/ + + + + + + diff --git a/tags/rag/index.html b/tags/rag/index.html new file mode 100644 index 00000000..7712f3b3 --- /dev/null +++ b/tags/rag/index.html @@ -0,0 +1,244 @@ + + + + + + RAG · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + RAG + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/rag/index.xml b/tags/rag/index.xml new file mode 100644 index 00000000..89c4cad5 --- /dev/null +++ b/tags/rag/index.xml @@ -0,0 +1,48 @@ + + + + RAG on Sara Zan + https://www.zansara.dev/tags/rag/ + Recent content in RAG on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + Office Hours: RAG Pipelines + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Recording: Office Hours - RAG Pipelines +Notebook: RAG_Pipelines.ipynb +All the material can also be found here. +In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. +In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. + + + diff --git a/tags/rag/page/1/index.html b/tags/rag/page/1/index.html new file mode 100644 index 00000000..85bc5c85 --- /dev/null +++ b/tags/rag/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/rag/ + + + + + + diff --git a/tags/raspberry-pi/index.html b/tags/raspberry-pi/index.html new file mode 100644 index 00000000..eaafb615 --- /dev/null +++ b/tags/raspberry-pi/index.html @@ -0,0 +1,241 @@ + + + + + + Raspberry Pi · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Raspberry Pi + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/raspberry-pi/index.xml b/tags/raspberry-pi/index.xml new file mode 100644 index 00000000..1fa83fc4 --- /dev/null +++ b/tags/raspberry-pi/index.xml @@ -0,0 +1,30 @@ + + + + Raspberry Pi on Sara Zan + https://www.zansara.dev/tags/raspberry-pi/ + Recent content in Raspberry Pi on Sara Zan + Hugo -- gohugo.io + en + Mon, 24 May 2021 00:00:00 +0000 + + + ZanzoCam: An open-source alpine web camera + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Slides: ZanzoCam: An open-source alpine web camera +On May 24th 2021 I held a talk about the ZanzoCam project as invited speaker for the &ldquo;Hardware and Software Codesign&rdquo; course at Università di Pavia. +The slides go through the entire lifecycle of the ZanzoCam project, from the very inception of it, the market research, our decision process, earlier prototypes, and then goes into a more detailed explanation of the the design and implementation of the project from a hardware and software perspective, with some notes about our financial situation and project management. + + + ZanzoCam + https://www.zansara.dev/projects/zanzocam/ + Wed, 01 Jan 2020 00:00:00 +0000 + https://www.zansara.dev/projects/zanzocam/ + Main website: https://zanzocam.github.io/ +ZanzoCam is a low-power, low-frequency camera based on Raspberry Pi, designed to operate autonomously in remote locations and under harsh conditions. It was designed and developed between 2019 and 2021 for CAI Lombardia by a team of two people, with me as the software developer and the other responsible for the hardware design. CAI later deployed several of these devices on their affiliate huts. +ZanzoCams are designed to work reliably in the harsh conditions of alpine winters, be as power-efficient as possible, and tolerate unstable network connections: they feature a robust HTTP- or FTP-based picture upload strategy which is remotely configurable from a very simple, single-file web panel. + + + diff --git a/tags/raspberry-pi/page/1/index.html b/tags/raspberry-pi/page/1/index.html new file mode 100644 index 00000000..28646305 --- /dev/null +++ b/tags/raspberry-pi/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/raspberry-pi/ + + + + + + diff --git a/tags/retrieval-augmentation/index.html b/tags/retrieval-augmentation/index.html new file mode 100644 index 00000000..4f77d443 --- /dev/null +++ b/tags/retrieval-augmentation/index.html @@ -0,0 +1,240 @@ + + + + + + Retrieval Augmentation · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Retrieval Augmentation + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/retrieval-augmentation/index.xml b/tags/retrieval-augmentation/index.xml new file mode 100644 index 00000000..8e04a8e6 --- /dev/null +++ b/tags/retrieval-augmentation/index.xml @@ -0,0 +1,37 @@ + + + + Retrieval Augmentation on Sara Zan + https://www.zansara.dev/tags/retrieval-augmentation/ + Recent content in Retrieval Augmentation on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + Indexing data for RAG applications + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + Sun, 05 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-05-haystack-series-minimal-indexing/ + In the previous post of the Haystack 2.0 series, we saw how to build RAG pipelines using a generator, a prompt builder, and a retriever with its document store. However, the content of our document store wasn&rsquo;t extensive, and populating one with clean, properly formatted data is not an easy task. How can we approach this problem? +In this post, I will show you how to use Haystack 2.0 to create large amounts of documents from a few web pages and write them a document store that you can then use for retrieval. + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + diff --git a/tags/retrieval-augmentation/page/1/index.html b/tags/retrieval-augmentation/page/1/index.html new file mode 100644 index 00000000..d6236d5f --- /dev/null +++ b/tags/retrieval-augmentation/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/retrieval-augmentation/ + + + + + + diff --git a/tags/retrieval/index.html b/tags/retrieval/index.html new file mode 100644 index 00000000..67c8b9df --- /dev/null +++ b/tags/retrieval/index.html @@ -0,0 +1,232 @@ + + + + + + Retrieval · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Retrieval + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/retrieval/index.xml b/tags/retrieval/index.xml new file mode 100644 index 00000000..bb7beec6 --- /dev/null +++ b/tags/retrieval/index.xml @@ -0,0 +1,24 @@ + + + + Retrieval on Sara Zan + https://www.zansara.dev/tags/retrieval/ + Recent content in Retrieval on Sara Zan + Hugo -- gohugo.io + en + Thu, 01 Dec 2022 00:00:00 +0000 + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + diff --git a/tags/retrieval/page/1/index.html b/tags/retrieval/page/1/index.html new file mode 100644 index 00000000..5d0626d6 --- /dev/null +++ b/tags/retrieval/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/retrieval/ + + + + + + diff --git a/tags/sdk/index.html b/tags/sdk/index.html new file mode 100644 index 00000000..8802243a --- /dev/null +++ b/tags/sdk/index.html @@ -0,0 +1,232 @@ + + + + + + SDK · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + SDK + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/sdk/index.xml b/tags/sdk/index.xml new file mode 100644 index 00000000..bbcde5bc --- /dev/null +++ b/tags/sdk/index.xml @@ -0,0 +1,23 @@ + + + + SDK on Sara Zan + https://www.zansara.dev/tags/sdk/ + Recent content in SDK on Sara Zan + Hugo -- gohugo.io + en + Sun, 10 Sep 2023 00:00:00 +0000 + + + An (unofficial) Python SDK for Verbix + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + PyPI package: https://pypi.org/project/verbix-sdk/ +GitHub Repo: https://github.com/ZanSara/verbix-sdk +Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md +As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were &ldquo;simple&rdquo;: +Supports many languages, including Italian, Portuguese and Hungarian Conjugates irregulars properly Offers an API access to the conjugation tables Refuses to conjugate anything except for known verbs (Optional) Highlights the irregularities in some way Surprisingly these seem to be a shortage of good alternatives in this field. + + + diff --git a/tags/sdk/page/1/index.html b/tags/sdk/page/1/index.html new file mode 100644 index 00000000..b917151a --- /dev/null +++ b/tags/sdk/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/sdk/ + + + + + + diff --git a/tags/semantic-search/index.html b/tags/semantic-search/index.html new file mode 100644 index 00000000..70e86d51 --- /dev/null +++ b/tags/semantic-search/index.html @@ -0,0 +1,236 @@ + + + + + + Semantic Search · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Semantic Search + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/semantic-search/index.xml b/tags/semantic-search/index.xml new file mode 100644 index 00000000..0ba6af3f --- /dev/null +++ b/tags/semantic-search/index.xml @@ -0,0 +1,29 @@ + + + + Semantic Search on Sara Zan + https://www.zansara.dev/tags/semantic-search/ + Recent content in Semantic Search on Sara Zan + Hugo -- gohugo.io + en + Fri, 27 Oct 2023 00:00:00 +0000 + + + RAG Pipelines from scratch + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Fri, 27 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-27-haystack-series-rag/ + Last updated: 21/11/2023 +Retrieval Augmented Generation (RAG) is quickly becoming an essential technique to make LLMs more reliable and effective at answering any question, regardless of how specific. To stay relevant in today&rsquo;s NLP landscape, Haystack must enable it. +Let&rsquo;s see how to build such applications with Haystack 2.0, from a direct call to an LLM to a fully-fledged, production-ready RAG pipeline that scales. At the end of this post, we will have an application that can answer questions about world countries based on data stored in a private database. + + + Haystack's Pipeline - A Deep Dive + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + Sun, 15 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-10-15-haystack-series-pipeline/ + If you&rsquo;ve ever looked at Haystack before, you must have come across the Pipeline, one of the most prominent concepts of the framework. However, this abstraction is by no means an obvious choice when it comes to NLP libraries. Why did we adopt this concept, and what does it bring us? +In this post, I go into all the details of how the Pipeline abstraction works in Haystack now, why it works this way, and its strengths and weaknesses. + + + diff --git a/tags/semantic-search/page/1/index.html b/tags/semantic-search/page/1/index.html new file mode 100644 index 00000000..3e769872 --- /dev/null +++ b/tags/semantic-search/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/semantic-search/ + + + + + + diff --git a/tags/text-to-image/index.html b/tags/text-to-image/index.html new file mode 100644 index 00000000..ca280d09 --- /dev/null +++ b/tags/text-to-image/index.html @@ -0,0 +1,232 @@ + + + + + + Text to Image · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Text to Image + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/text-to-image/index.xml b/tags/text-to-image/index.xml new file mode 100644 index 00000000..4d43c7f7 --- /dev/null +++ b/tags/text-to-image/index.xml @@ -0,0 +1,24 @@ + + + + Text to Image on Sara Zan + https://www.zansara.dev/tags/text-to-image/ + Recent content in Text to Image on Sara Zan + Hugo -- gohugo.io + en + Thu, 01 Dec 2022 00:00:00 +0000 + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + diff --git a/tags/text-to-image/page/1/index.html b/tags/text-to-image/page/1/index.html new file mode 100644 index 00000000..420d9268 --- /dev/null +++ b/tags/text-to-image/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/text-to-image/ + + + + + + diff --git a/tags/verbix/index.html b/tags/verbix/index.html new file mode 100644 index 00000000..ee9d64dd --- /dev/null +++ b/tags/verbix/index.html @@ -0,0 +1,232 @@ + + + + + + Verbix · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Verbix + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/verbix/index.xml b/tags/verbix/index.xml new file mode 100644 index 00000000..6dfe67b7 --- /dev/null +++ b/tags/verbix/index.xml @@ -0,0 +1,23 @@ + + + + Verbix on Sara Zan + https://www.zansara.dev/tags/verbix/ + Recent content in Verbix on Sara Zan + Hugo -- gohugo.io + en + Sun, 10 Sep 2023 00:00:00 +0000 + + + An (unofficial) Python SDK for Verbix + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + Sun, 10 Sep 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-09-10-python-verbix-sdk/ + PyPI package: https://pypi.org/project/verbix-sdk/ +GitHub Repo: https://github.com/ZanSara/verbix-sdk +Minimal Docs: https://github.com/ZanSara/verbix-sdk/blob/main/README.md +As part of a larger side project which is still in the works, these days I found myself looking for some decent API for verbs conjugations in different languages. My requirements were &ldquo;simple&rdquo;: +Supports many languages, including Italian, Portuguese and Hungarian Conjugates irregulars properly Offers an API access to the conjugation tables Refuses to conjugate anything except for known verbs (Optional) Highlights the irregularities in some way Surprisingly these seem to be a shortage of good alternatives in this field. + + + diff --git a/tags/verbix/page/1/index.html b/tags/verbix/page/1/index.html new file mode 100644 index 00000000..2f8b7620 --- /dev/null +++ b/tags/verbix/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/verbix/ + + + + + + diff --git a/tags/web/index.html b/tags/web/index.html new file mode 100644 index 00000000..d53fd526 --- /dev/null +++ b/tags/web/index.html @@ -0,0 +1,254 @@ + + + + + + Web · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Web + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/web/index.xml b/tags/web/index.xml new file mode 100644 index 00000000..85449bdd --- /dev/null +++ b/tags/web/index.xml @@ -0,0 +1,47 @@ + + + + Web on Sara Zan + https://www.zansara.dev/tags/web/ + Recent content in Web on Sara Zan + Hugo -- gohugo.io + en + Thu, 09 Nov 2023 00:00:00 +0000 + + + The World of Web RAG + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + Thu, 09 Nov 2023 00:00:00 +0000 + https://www.zansara.dev/posts/2023-11-09-haystack-series-simple-web-rag/ + In an earlier post of the Haystack 2.0 series, we&rsquo;ve seen how to build RAG and indexing pipelines. An application that uses these two pipelines is practical if you have an extensive, private collection of documents and need to perform RAG on such data only. However, in many cases, you may want to get data from the Internet: from news outlets, documentation pages, and so on. +In this post, we will see how to build a Web RAG application: a RAG pipeline that can search the Web for the information needed to answer your questions. + + + ZanzoCam: An open-source alpine web camera + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Slides: ZanzoCam: An open-source alpine web camera +On May 24th 2021 I held a talk about the ZanzoCam project as invited speaker for the &ldquo;Hardware and Software Codesign&rdquo; course at Università di Pavia. +The slides go through the entire lifecycle of the ZanzoCam project, from the very inception of it, the market research, our decision process, earlier prototypes, and then goes into a more detailed explanation of the the design and implementation of the project from a hardware and software perspective, with some notes about our financial situation and project management. + + + ZanzoCam + https://www.zansara.dev/projects/zanzocam/ + Wed, 01 Jan 2020 00:00:00 +0000 + https://www.zansara.dev/projects/zanzocam/ + Main website: https://zanzocam.github.io/ +ZanzoCam is a low-power, low-frequency camera based on Raspberry Pi, designed to operate autonomously in remote locations and under harsh conditions. It was designed and developed between 2019 and 2021 for CAI Lombardia by a team of two people, with me as the software developer and the other responsible for the hardware design. CAI later deployed several of these devices on their affiliate huts. +ZanzoCams are designed to work reliably in the harsh conditions of alpine winters, be as power-efficient as possible, and tolerate unstable network connections: they feature a robust HTTP- or FTP-based picture upload strategy which is remotely configurable from a very simple, single-file web panel. + + + CAI Sovico's Website + https://www.zansara.dev/projects/booking-system/ + Fri, 01 Jan 2016 00:00:00 +0000 + https://www.zansara.dev/projects/booking-system/ + Main website: https://www.caisovico.it +Since my bachelor studies I have maintained the IT infrastructure of an alpine hut, Rifugio M. Del Grande - R. Camerini. I count this as my first important project, one that people, mostly older and not very tech savvy, depended on to run a real business. +The website went through several iterations as web technologies evolved, and well as the type of servers we could afford. Right now it features minimal HTML/CSS static pages, plus a reservations system written on a PHP 8 / MySQL backend with a vanilla JS frontend. + + + diff --git a/tags/web/page/1/index.html b/tags/web/page/1/index.html new file mode 100644 index 00000000..5599bb8f --- /dev/null +++ b/tags/web/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/web/ + + + + + + diff --git a/tags/zanzocam/index.html b/tags/zanzocam/index.html new file mode 100644 index 00000000..f0df19f0 --- /dev/null +++ b/tags/zanzocam/index.html @@ -0,0 +1,241 @@ + + + + + + ZanzoCam · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + ZanzoCam + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/zanzocam/index.xml b/tags/zanzocam/index.xml new file mode 100644 index 00000000..3afc51a2 --- /dev/null +++ b/tags/zanzocam/index.xml @@ -0,0 +1,30 @@ + + + + ZanzoCam on Sara Zan + https://www.zansara.dev/tags/zanzocam/ + Recent content in ZanzoCam on Sara Zan + Hugo -- gohugo.io + en + Mon, 24 May 2021 00:00:00 +0000 + + + ZanzoCam: An open-source alpine web camera + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Slides: ZanzoCam: An open-source alpine web camera +On May 24th 2021 I held a talk about the ZanzoCam project as invited speaker for the &ldquo;Hardware and Software Codesign&rdquo; course at Università di Pavia. +The slides go through the entire lifecycle of the ZanzoCam project, from the very inception of it, the market research, our decision process, earlier prototypes, and then goes into a more detailed explanation of the the design and implementation of the project from a hardware and software perspective, with some notes about our financial situation and project management. + + + ZanzoCam + https://www.zansara.dev/projects/zanzocam/ + Wed, 01 Jan 2020 00:00:00 +0000 + https://www.zansara.dev/projects/zanzocam/ + Main website: https://zanzocam.github.io/ +ZanzoCam is a low-power, low-frequency camera based on Raspberry Pi, designed to operate autonomously in remote locations and under harsh conditions. It was designed and developed between 2019 and 2021 for CAI Lombardia by a team of two people, with me as the software developer and the other responsible for the hardware design. CAI later deployed several of these devices on their affiliate huts. +ZanzoCams are designed to work reliably in the harsh conditions of alpine winters, be as power-efficient as possible, and tolerate unstable network connections: they feature a robust HTTP- or FTP-based picture upload strategy which is remotely configurable from a very simple, single-file web panel. + + + diff --git a/tags/zanzocam/page/1/index.html b/tags/zanzocam/page/1/index.html new file mode 100644 index 00000000..767d1a1c --- /dev/null +++ b/tags/zanzocam/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/tags/zanzocam/ + + + + + + diff --git a/static/talks/2021-05-24-zanzocam-pavia.pdf b/talks/2021-05-24-zanzocam-pavia.pdf similarity index 100% rename from static/talks/2021-05-24-zanzocam-pavia.pdf rename to talks/2021-05-24-zanzocam-pavia.pdf diff --git a/static/talks/2021-05-24-zanzocam-pavia.png b/talks/2021-05-24-zanzocam-pavia.png similarity index 100% rename from static/talks/2021-05-24-zanzocam-pavia.png rename to talks/2021-05-24-zanzocam-pavia.png diff --git a/talks/2021-05-24-zanzocam-pavia/index.html b/talks/2021-05-24-zanzocam-pavia/index.html new file mode 100644 index 00000000..ec4179a3 --- /dev/null +++ b/talks/2021-05-24-zanzocam-pavia/index.html @@ -0,0 +1,304 @@ + + + + + + ZanzoCam: An open-source alpine web camera · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + ZanzoCam: An open-source alpine web camera + +

+
+ +
+ +
+ + Featured image + +

Slides: ZanzoCam: An open-source alpine web camera

+
+

On May 24th 2021 I held a talk about the ZanzoCam project +as invited speaker for the “Hardware and Software Codesign” course at +Università di Pavia.

+

The slides go through the entire lifecycle of the ZanzoCam project, +from the very inception of it, the market research, our decision process, earlier prototypes, and +then goes into a more detailed explanation of the the design and implementation of the project from +a hardware and software perspective, with some notes about our financial situation and project management.

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/talks/2022-12-01-open-nlp-meetup.png b/talks/2022-12-01-open-nlp-meetup.png similarity index 100% rename from static/talks/2022-12-01-open-nlp-meetup.png rename to talks/2022-12-01-open-nlp-meetup.png diff --git a/talks/2022-12-01-open-nlp-meetup/index.html b/talks/2022-12-01-open-nlp-meetup/index.html new file mode 100644 index 00000000..f1ac6d17 --- /dev/null +++ b/talks/2022-12-01-open-nlp-meetup/index.html @@ -0,0 +1,350 @@ + + + + + + A Practical Introduction to Image Retrieval · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + A Practical Introduction to Image Retrieval + +

+
+ +
+ +
+ + Featured image + +

Youtube: Open NLP meetup #7

+

Slides: A Practical Introduction to Image Retrieval

+

Colab: MultiModalRetriever - Live coding

+

All the material can also be found here.

+
+

+ A Practical Introduction to Image Retrieval + + + Link to heading + +

+

by Sara Zanzottera from deepset

+

Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. Text-to-text generation models like GPT now have their counterparts in text-to-image models, like Stable Diffusion. But what about search? In this talk we’re going to experiment with CLIP, a text-to-image search model, to look for animals matching specific characteristics in a dataset of pictures. Does CLIP know which one is “The fastest animal in the world”?

+
+

For the 7th OpenNLP meetup I presented the topic of Image Retrieval, a feature that I’ve recently added to Haystack in the form of a MultiModal Retriever (see the Tutorial).

+

The talk consists of 5 parts:

+
    +
  • An introduction of the topic of Image Retrieval
  • +
  • A mention of the current SOTA model (CLIP)
  • +
  • An overview of Haystack
  • +
  • A step-by-step description of how image retrieval applications can be implemented with Haystack
  • +
  • A live coding session where I start from a blank Colab notebook and build a fully working image retrieval system from the ground up, to the point where I can run queries live.
  • +
+

Towards the end I mention briefly an even more advanced version of this image retrieval system, which I had no time to implement live. However, I later built a notebook implementing such system and you can find it here: Cheetah.ipynb

+

The slides were generated from the linked Jupyter notebook with jupyter nbconvert Dec_1st_OpenNLP_Meetup.ipynb --to slides --post serve.

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/talks/2023-08-03-office-hours-haystack-2.0-status.png b/talks/2023-08-03-office-hours-haystack-2.0-status.png similarity index 100% rename from static/talks/2023-08-03-office-hours-haystack-2.0-status.png rename to talks/2023-08-03-office-hours-haystack-2.0-status.png diff --git a/talks/2023-08-03-office-hours-haystack-2.0-status/index.html b/talks/2023-08-03-office-hours-haystack-2.0-status/index.html new file mode 100644 index 00000000..4eefdd91 --- /dev/null +++ b/talks/2023-08-03-office-hours-haystack-2.0-status/index.html @@ -0,0 +1,309 @@ + + + + + + Office Hours: Haystack 2.0 · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Office Hours: Haystack 2.0 + +

+
+ +
+ +
+ + Featured image + +

Recording: Haystack v2 - Office Hours

+

Slides: Haystack v2 - Status Update

+

All the material can also be found here.

+
+

In this Office Hours I’ve presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a preview module in the latest Haystack 1.x releases, we took the opportunity to share this early draft of the project to collect early feedback.

+

Haystack 2.0 is a total rewrite that rethinks many of the core concepts of the framework and makes LLMs support its primary concern, but makes sure to support all the usecases its predecessor enabled. The rewrite addresses some well-know, old issues about the pipeline’s design, the relationship between the pipeline, its components, and the document stores, and aims at improving drastically the developer experience and the framework’s extensibility.

+

As the main designer of this rewrite, I walked the community through a slightly re-hashed version of the slide deck I’ve presented internally just a few days earlier in an All Hands on the same topic.

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/talks/2023-10-12-office-hours-rag-pipelines.png b/talks/2023-10-12-office-hours-rag-pipelines.png similarity index 100% rename from static/talks/2023-10-12-office-hours-rag-pipelines.png rename to talks/2023-10-12-office-hours-rag-pipelines.png diff --git a/talks/2023-10-12-office-hours-rag-pipelines/index.html b/talks/2023-10-12-office-hours-rag-pipelines/index.html new file mode 100644 index 00000000..84e4adc8 --- /dev/null +++ b/talks/2023-10-12-office-hours-rag-pipelines/index.html @@ -0,0 +1,316 @@ + + + + + + Office Hours: RAG Pipelines · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+
+
+

+ + Office Hours: RAG Pipelines + +

+
+ +
+ +
+ + Featured image + +

Recording: Office Hours - RAG Pipelines

+

Notebook: RAG_Pipelines.ipynb

+

All the material can also be found here.

+
+

In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications.

+

In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies.

+

The talk indirectly shows also how Pipelines can help users compose these systems quickly, to visualize them, and helps them connect together different parts by producing verbose error messages.

+ +
+ + +
+ + + + + + + + +
+
+ + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/talks/index.html b/talks/index.html new file mode 100644 index 00000000..daa9a279 --- /dev/null +++ b/talks/index.html @@ -0,0 +1,244 @@ + + + + + + Talks · Sara Zan + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + +
+ + + +
+ +
+
+

+ + Talks + +

+
+ + + + + + + + + + +
+ +
+ +
+
+ © + + 2023 + Sara Zan + · + + Powered by Hugo & Coder. + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/talks/index.xml b/talks/index.xml new file mode 100644 index 00000000..69a6e3f9 --- /dev/null +++ b/talks/index.xml @@ -0,0 +1,54 @@ + + + + Talks on Sara Zan + https://www.zansara.dev/talks/ + Recent content in Talks on Sara Zan + Hugo -- gohugo.io + en + Thu, 12 Oct 2023 00:00:00 +0000 + + + Office Hours: RAG Pipelines + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Thu, 12 Oct 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-10-12-office-hours-rag-pipelines/ + Recording: Office Hours - RAG Pipelines +Notebook: RAG_Pipelines.ipynb +All the material can also be found here. +In this Office Hours I walk through the LLM support offered by Haystack 2.0 to this date: Generator, PromptBuilder, and how to connect them to different types of Retrievers to build Retrieval Augmented Generation (RAG) applications. +In under 40 minutes we start from a simple query to ChatGPT up to a full pipeline that retrieves documents from the Internet, splits them into chunks and feeds them to an LLM to ground its replies. + + + Office Hours: Haystack 2.0 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Thu, 03 Aug 2023 00:00:00 +0000 + https://www.zansara.dev/talks/2023-08-03-office-hours-haystack-2.0-status/ + Recording: Haystack v2 - Office Hours +Slides: Haystack v2 - Status Update +All the material can also be found here. +In this Office Hours I&rsquo;ve presented for the first time to our Discord community a preview of the upcoming 2.0 release of Haystack, which has been in the works since the start of the year. As rumors started to arise at the presence of a preview module in the latest Haystack 1. + + + A Practical Introduction to Image Retrieval + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Thu, 01 Dec 2022 00:00:00 +0000 + https://www.zansara.dev/talks/2022-12-01-open-nlp-meetup/ + Youtube: Open NLP meetup #7 +Slides: A Practical Introduction to Image Retrieval +Colab: MultiModalRetriever - Live coding +All the material can also be found here. +A Practical Introduction to Image Retrieval Link to heading by Sara Zanzottera from deepset +Search should not be limited to text only. Recently, Transformers-based NLP models started crossing the boundaries of text data and exploring the possibilities of other modalities, like tabular data, images, audio files, and more. + + + ZanzoCam: An open-source alpine web camera + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Mon, 24 May 2021 00:00:00 +0000 + https://www.zansara.dev/talks/2021-05-24-zanzocam-pavia/ + Slides: ZanzoCam: An open-source alpine web camera +On May 24th 2021 I held a talk about the ZanzoCam project as invited speaker for the &ldquo;Hardware and Software Codesign&rdquo; course at Università di Pavia. +The slides go through the entire lifecycle of the ZanzoCam project, from the very inception of it, the market research, our decision process, earlier prototypes, and then goes into a more detailed explanation of the the design and implementation of the project from a hardware and software perspective, with some notes about our financial situation and project management. + + + diff --git a/talks/page/1/index.html b/talks/page/1/index.html new file mode 100644 index 00000000..94491636 --- /dev/null +++ b/talks/page/1/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/talks/ + + + + + + diff --git a/themes/hugo-coder/.gitignore b/themes/hugo-coder/.gitignore deleted file mode 100644 index 2be26dd2..00000000 --- a/themes/hugo-coder/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.idea -**/themes/ -exampleSite/public/ -exampleSite/resources/ -*.lock -public diff --git a/themes/hugo-coder/CONTRIBUTORS.md b/themes/hugo-coder/CONTRIBUTORS.md deleted file mode 100644 index 3b29b679..00000000 --- a/themes/hugo-coder/CONTRIBUTORS.md +++ /dev/null @@ -1,136 +0,0 @@ -# Contributors - -- [Chip Senkbeil](https://github.com/chipsenkbeil) -- [Dale Noe](https://github.com/dalenoe) -- [Gabor Nagy](https://github.com/Aigeruth) -- [Harry Khanna](https://github.com/hkhanna) -- [Ihor Dvoretskyi](https://github.com/idvoretskyi) -- [Jacob Wood](https://github.com/jacoblukewood) -- [Jan Baudisch](https://github.com/flyingP0tat0) -- [Jiri Hubacek](https://github.com/qeef) -- [Khosrow Moossavi](https://github.com/khos2ow) -- [Maikel](https://github.com/mbollemeijer) -- [MetBril](https://github.com/metbril) -- [Myles Johnson](https://github.com/MylesJohnson) -- [Niels Reijn](https://github.com/reijnn) -- [Padraic Renaghan](https://github.com/prenagha) -- [peterrus](https://github.com/peterrus) -- [Philipp Rintz](https://github.com/p-rintz) -- [Ralf Junghanns](https://github.com/rabbl) -- [rdhox](https://rdhox.io) -- [tobaloidee](https://github.com/Tobaloidee) -- [Tomasz Wąsiński](https://github.com/wasinski) -- [Vinícius dos Santos Oliveira](https://github.com/vinipsmaker) -- [Vlad Ionescu](https://github.com/Vlaaaaaaad) -- [Joseph Ting](https://github.com/josephting) -- [Abner Campanha](https://github.com/abnerpc) -- [Martin Kiesel](https://github.com/Kyslik) -- [John Tobin](https://www.johntobin.ie/) -- [Thomas Nys](https://thomasnys.com) -- [Piotr Januszewski](https://piojanu.github.io) -- [Artem Khvastunov](https://artspb.me) -- [Gabriel Nepomuceno](https://blog.nepomuceno.me) -- [Salvatore Giordano](https://salvatore-giordano.github.io) -- [Jeffrey Carpenter](https://uvolabs.me) -- [Paul Lettington](https://github.com/plett) -- [Thomas Vochten](https://github.com/thomasvochten) -- [Caspar Krieger](https://www.asparck.com) -- [D_DAndrew](https://d-dandrew.github.io) -- [Wataru Mizukami](https://github.com/tarumzu) -- [Yudi Widiyanto](https://github.com/yudiwdynto) -- [Łukasz Mróz](https://github.com/mrozlukasz) -- [Jia "Jay" Tan](https://github.com/j7an) -- [Ryan](https://github.com/alrayyes) -- [Naim A.](https://github.com/naim94a) -- [Alexander Rohde](https://github.com/a1x42) -- [Shreyansh Khajanchi](https://shreyanshja.in) -- [Lionel Brianto](https://lionel.brianto.dev) -- [Luis Zarate](https://github.com/jlzaratec) -- [Ariejan de Vroom](https://www.devroom.io) -- [Bobby Lindsey](https://bobbywlindsey.com) -- [José Mª Escartín](https://github.com/jme52) -- [John Schroeder](https://blog.schroedernet.software) -- [Tobias Lindberg](https://github.com/tobiasehlert) -- [KK](https://github.com/bebound) -- [Eli W. Hunter](https://github.com/elihunter173) -- [Víctor López](https://github.com/viticlick) -- [Anson VanDoren](https://github.com/anson-vandoren) -- [Michael Lynch](https://github.com/mtlynch) -- [FIGBERT](https://figbert.com/) -- [Yash Mehrotra](https://yashmehrotra.com) -- [Paolo Mainardi](https://paolomainardi.com) -- [Ka-Wai Lin](https://github.com/kwlin) -- [Piotr Orzechowski](https://orzechowski.tech) -- [Glenn Feunteun](https://github.com/gfeun) -- [Santiago González](https://github.com/netrules) -- [Codruț Constantin Gușoi](https://www.codrut.pro) -- [Clément Pannetier](https://clementpannetier.dev) -- [FantasticMao](https://github.com/FantasticMao) -- [Utkarsh Gupta](https://utkarsh2102.com) -- [Latiif Alsharif](https://latiif.se) -- [Endormi](https://endormi.io) -- [Rajiv Ranjan Singh](https://iamrajiv.github.io/) -- [Pakhomov Alexander](https://github.com/PakhomovAlexander) -- [Rhys Perry](https://rhysperry.com) -- [Arunvel Sriram](https://github.com/arunvelsriram) -- [Lorenzo Cameroni](https://github.com/came88) -- [Jared Sturdy](https://github.com/jsturdy) -- [Daniel Monteiro](https://github.com/dfamonteiro) -- [Dave Rolsky](https://github.com/autarch) -- [Joseph Sanders](https://github.com/jls83) -- [Rabin Adhikari](https://github.com/rabinadk1/) -- [Hussaini Zulkifli](https://github.com/hussaini/) -- [Ellison Leão](https://github.com/ellisonleao) -- [Lucas de Oliveira](https://github.com/lucas-dOliveira) -- [Jian Loong Liew](https://github.com/JianLoong) -- [earnest ma](https://github.com/earnestma) -- [TMineCola](https://github.com/tminecola) -- [Arafat Hasan](https://github.com/arafat-hasan) -- [YUJI](https://yuji.ne.jp/) -- [JaeSang Yoo](https://github.com/JSYoo5B) -- [tianheg](https://github.com/tianheg) -- [Felix](https://github.com/lazyyz) -- [Peter Duchnovsky](https://pduchnovsky.com) -- [Alex Miranda](https://ammiranda.com) -- [Alphonse Mariya](https://github.com/alfunx) -- [Ziwei Pan](https://github.com/PanZiwei/) -- [Viktar Patotski](https://github.com/xp-vit) -- [cuso4-5h2o](https://www.cuso4.me) -- [freeformz](https://icanhazdowntime.org) -- [Roberto Gongora](https://yourfavourite.blog) -- [kuba86](https://kuba86.com) -- [Vladislav Matus](https://github.com/matusvla) -- [Kirill Feoktistov](https://feoktistoff.org) -- [leins275](https://github.com/LanskovNV) -- [Michael Weiss](https://mweiss.ch) -- [Simon Pai](https://github.com/simonpai) -- [Brenton Mallen](https://github.com/brentonmallen1) -- [Xiaoyang Luo](https://github.com/ccviolett/) -- [Michiel Appelman](https://appelman.se) -- [Mark Wood](https://digitalnotions.net) -- [Sam A.](https://samsapti.dev) -- [John Feminella](https://jxf.me) -- [zzsqwq](https://zzsqwq.cn) -- [George Tsiokos](https://george.tsiokos.com) -- [Eltjo](https://github.com/eltjo) -- [Saurmandal](https://saur.neocities.org) -- [Jneo8](https://github.com/jneo8) -- [Daniel Nduati](https://github.com/DanNduati) -- [Simon Hollingshead](https://github.com/simonhollingshead) -- [yangyangdaji](https://github.com/yangyangdaji) -- [xiaotianxt](https://github.com/xiaotianxt) -- [Nour Agha](https://github.com/nourkagha) -- [Brian Lachniet](https://github.com/blachniet) -- [ShortArrow](https://github.com/ShortArrow) -- [Martin Hellspong](https://github.com/marhel) -- [Robert Tucker](https://github.com/robertwtucker) -- [Michał Pawlik](https://michalp.net) -- [Kilian Kluge](https://github.com/ionicsolutions) -- [Jaroslaw Rozanski](https://jarekrozanski.eu) -- [Easton Man](https://github.com/eastonman) -- [Yiğit Altınay](https://altinay.xyz) -- [Fei Kong](https://github.com/alpha0422) -- [Ahmet Enes Bayraktar](https://github.com/aeb-dev) -- [Todor Bogosavljević](https://github.com/tbx1b) -- [Kemal Akkoyun](https://github.com/kakkoyun) -- [Igetin](https://github.com/Igetin) diff --git a/themes/hugo-coder/LICENSE.md b/themes/hugo-coder/LICENSE.md deleted file mode 100644 index 29dbd757..00000000 --- a/themes/hugo-coder/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018 Luiz F. A. de Prá - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/themes/hugo-coder/README.md b/themes/hugo-coder/README.md deleted file mode 100644 index 482a132f..00000000 --- a/themes/hugo-coder/README.md +++ /dev/null @@ -1,53 +0,0 @@ -

-

- - Hugo Theme Badge - - - MIT License Badge - -

- -

- - Hugo Coder Logo - -

-

- -A simple and clean blog theme for [Hugo](https://gohugo.io/). - -![](images/screenshot.png) - -## Live Demo - -See [here](https://hugo-coder.netlify.app/). - -## Quick Start - -1. Add the repository into your Hugo Project repository as a submodule, `git submodule add https://github.com/luizdepra/hugo-coder.git themes/hugo-coder`. -2. Configure your `config.toml`. You can either use [this minimal configuration](https://github.com/luizdepra/hugo-coder/blob/main/docs/configurations.md#complete-example) as a base, or look for a complete explanation about all configurations [here](https://github.com/luizdepra/hugo-coder/blob/main/docs/configurations.md). The [`config.toml`](https://github.com/luizdepra/hugo-coder/blob/master/exampleSite/config.toml) inside the [`exampleSite`](https://github.com/luizdepra/hugo-coder/tree/master/exampleSite) is also a good reference. -3. Build your site with `hugo server` and see the result at `http://localhost:1313/`. - -## Documentation - -See the [`docs`](docs/home.md) folder. - -## License - -Coder is licensed under the [MIT license](https://github.com/luizdepra/hugo-coder/blob/master/LICENSE.md). - -## Maintenance - -This theme is maintained by its author [Luiz de Prá](https://github.com/luizdepra) with the help from these awesome [contributors](CONTRIBUTORS.md). - -## Sponsoring - -If you like my project or it was useful for you, consider supporting its development. Just: - -Buy Me A Coffee - -## Special Thanks - -- Gleen McComb, for his great [article](https://glennmccomb.com/articles/how-to-build-custom-hugo-pagination/) about custom pagination. -- All contributors, for every PR and Issue reported. diff --git a/themes/hugo-coder/archetypes/default.md b/themes/hugo-coder/archetypes/default.md deleted file mode 100644 index c2de8e47..00000000 --- a/themes/hugo-coder/archetypes/default.md +++ /dev/null @@ -1,6 +0,0 @@ -+++ -draft = true -date = {{ .Date }} -title = "" -slug = "" -+++ diff --git a/themes/hugo-coder/archetypes/posts.md b/themes/hugo-coder/archetypes/posts.md deleted file mode 100644 index 76242f81..00000000 --- a/themes/hugo-coder/archetypes/posts.md +++ /dev/null @@ -1,12 +0,0 @@ -+++ -draft = true -date = {{ .Date }} -title = "" -description = "" -slug = "" -authors = [] -tags = [] -categories = [] -externalLink = "" -series = [] -+++ diff --git a/themes/hugo-coder/assets/js/coder.js b/themes/hugo-coder/assets/js/coder.js deleted file mode 100644 index 9b46cfc3..00000000 --- a/themes/hugo-coder/assets/js/coder.js +++ /dev/null @@ -1,97 +0,0 @@ -const body = document.body; -const darkModeToggle = document.getElementById('dark-mode-toggle'); -const darkModeMediaQuery = window.matchMedia('(prefers-color-scheme: dark)'); - -// Check if user preference is set, if not check value of body class for light or dark else it means that colorscheme = auto -if (localStorage.getItem("colorscheme")) { - setTheme(localStorage.getItem("colorscheme")); -} else if (body.classList.contains('colorscheme-light') || body.classList.contains('colorscheme-dark')) { - setTheme(body.classList.contains("colorscheme-dark") ? "dark" : "light"); -} else { - setTheme(darkModeMediaQuery.matches ? "dark" : "light"); -} - -if (darkModeToggle) { - darkModeToggle.addEventListener('click', () => { - let theme = body.classList.contains("colorscheme-dark") ? "light" : "dark"; - setTheme(theme); - rememberTheme(theme); - }); -} - -darkModeMediaQuery.addListener((event) => { - setTheme(event.matches ? "dark" : "light"); -}); - -document.addEventListener("DOMContentLoaded", function () { - let node = document.querySelector('.preload-transitions'); - node.classList.remove('preload-transitions'); -}); - -function setTheme(theme) { - body.classList.remove('colorscheme-auto'); - let inverse = theme === 'dark' ? 'light' : 'dark'; - body.classList.remove('colorscheme-' + inverse); - body.classList.add('colorscheme-' + theme); - document.documentElement.style['color-scheme'] = theme; - - function waitForElm(selector) { - return new Promise(resolve => { - if (document.querySelector(selector)) { - return resolve(document.querySelector(selector)); - } - - const observer = new MutationObserver(mutations => { - if (document.querySelector(selector)) { - resolve(document.querySelector(selector)); - observer.disconnect(); - } - }); - - observer.observe(document.body, { - childList: true, - subtree: true - }); - }); - } - - if (theme === 'dark') { - const message = { - type: 'set-theme', - theme: 'github-dark' - }; - waitForElm('.utterances-frame').then((iframe) => { - iframe.contentWindow.postMessage(message, 'https://utteranc.es'); - }) - - } - else { - const message = { - type: 'set-theme', - theme: 'github-light' - }; - waitForElm('.utterances-frame').then((iframe) => { - iframe.contentWindow.postMessage(message, 'https://utteranc.es'); - }) - - } - - function sendMessage(message) { - const iframe = document.querySelector('iframe.giscus-frame'); - if (!iframe) return; - iframe.contentWindow.postMessage({ giscus: message }, 'https://giscus.app'); - } - sendMessage({ - setConfig: { - theme: theme, - }, - }); - - // Create and send event - const event = new Event('themeChanged'); - document.dispatchEvent(event); -} - -function rememberTheme(theme) { - localStorage.setItem('colorscheme', theme); -} diff --git a/themes/hugo-coder/assets/scss/_base.scss b/themes/hugo-coder/assets/scss/_base.scss deleted file mode 100644 index 2b7d7e49..00000000 --- a/themes/hugo-coder/assets/scss/_base.scss +++ /dev/null @@ -1,281 +0,0 @@ -*, -*:after, -*:before { - box-sizing: inherit; -} - -html { - box-sizing: border-box; - font-size: 62.5%; -} - -body { - color: $fg-color; - background-color: $bg-color; - font-family: $font-family; - font-size: 1.8em; - font-weight: 400; - line-height: 1.8em; - - @media only screen and (max-width: 768px) { - font-size: 1.6em; - line-height: 1.6em; - } -} - -iframe[src*=disqus] { - color-scheme: light; -} - -a { - font-weight: 500; - color: $link-color; - text-decoration: none; - transition: all 0.25s ease-in; - - &:focus, - &:hover { - text-decoration: underline; - } -} - -p { - margin: 2rem 0 2rem 0; -} - -h1, -h2, -h3, -h4, -h5, -h6 { - font-family: $font-family; - font-weight: 600; - color: $alt-fg-color; - margin: 4rem 0 2.5rem 0; - - &:hover .heading-link { - visibility: visible; - } - - .heading-link { - color: $link-color; - font-weight: inherit; - text-decoration: none; - font-size: 80%; - visibility: hidden; - } - - .title-link { - color: inherit; - font-weight: inherit; - text-decoration: none; - } -} - -h1 { - font-size: 3.2rem; - line-height: 3.6rem; - - @media only screen and (max-width: 768px) { - font-size: 3rem; - line-height: 3.4rem; - } -} - -h2 { - font-size: 2.8rem; - line-height: 3.2rem; - - @media only screen and (max-width: 768px) { - font-size: 2.6rem; - line-height: 3rem; - } -} - -h3 { - font-size: 2.4rem; - line-height: 2.8rem; - - @media only screen and (max-width: 768px) { - font-size: 2.2rem; - line-height: 2.6rem; - } -} - -h4 { - font-size: 2.2rem; - line-height: 2.6rem; - - @media only screen and (max-width: 768px) { - font-size: 2rem; - line-height: 2.4rem; - } -} - -h5 { - font-size: 2rem; - line-height: 2.4rem; - - @media only screen and (max-width: 768px) { - font-size: 1.8rem; - line-height: 2.2rem; - } -} - -h6 { - font-size: 1.8rem; - line-height: 2.2rem; - - @media only screen and (max-width: 768px) { - font-size: 1.6rem; - line-height: 2rem; - } -} - -b, -strong { - font-weight: 700; -} - -.highlight { - - div, - pre { - margin: 2rem 0 2rem; - padding: 1rem; - border-radius: 1rem; - } -} - -pre { - display: block; - font-family: $code-font-family; - font-size: 1.6rem; - font-weight: 400; - line-height: 2.6rem; - overflow-x: auto; - margin: 2rem 0 2rem; - padding: 1rem; - border-radius: 1rem; - - code { - display: inline-block; - background-color: inherit; - color: inherit; - } -} - -code { - font-family: $code-font-family; - font-size: 1.6rem; - font-weight: 400; - border-radius: 0.6rem; - padding: 0.3rem 0.6rem; - background-color: $darker-alt-bg-color; - color: $fg-color; -} - -blockquote { - border-left: 2px solid $alt-bg-color; - padding-left: 2rem; - line-height: 2.2rem; - font-weight: 400; - font-style: italic; -} - -th, -td { - padding: 1.6rem; -} - -table { - border-collapse: collapse; -} - -table td, -table th { - border: 2px solid $alt-fg-color; -} - -table tr:first-child th { - border-top: 0; -} - -table tr:last-child td { - border-bottom: 0; -} - -table tr td:first-child, -table tr th:first-child { - border-left: 0; -} - -table tr td:last-child, -table tr th:last-child { - border-right: 0; -} - -img { - width: 100%; -} - -figure { - text-align: center; -} - -.footnotes { - ol li p { - margin: 0; - } -} - -.preload-transitions * { - $null-transition: none !important; - - -webkit-transition: $null-transition; - -moz-transition: $null-transition; - -ms-transition: $null-transition; - -o-transition: $null-transition; - transition: $null-transition; -} - -.wrapper { - display: flex; - flex-direction: column; - - min-height: 100vh; - width: 100%; -} - -.container { - margin: 1rem auto; - max-width: 90rem; - width: 100%; - padding-left: 2rem; - padding-right: 2rem; -} - -.fab { - font-weight: 400; -} - -.fas { - font-weight: 700; -} - -.float-right { - float: right; -} - -.float-left { - float: left; -} - -.fab { - font-weight: 400; -} - -.fas { - font-weight: 900; -} \ No newline at end of file diff --git a/themes/hugo-coder/assets/scss/_base_dark.scss b/themes/hugo-coder/assets/scss/_base_dark.scss deleted file mode 100644 index 4a1490d7..00000000 --- a/themes/hugo-coder/assets/scss/_base_dark.scss +++ /dev/null @@ -1,91 +0,0 @@ -@mixin base_dark { - color: $fg-color-dark; - background-color: $bg-color-dark; - - a { - color: $link-color-dark; - } - - h1, - h2, - h3, - h4, - h5, - h6 { - color: $alt-fg-color-dark; - - &:hover .heading-link { - visibility: visible; - } - - .heading-link { - color: $link-color-dark; - font-weight: inherit; - text-decoration: none; - font-size: 80%; - visibility: hidden; - } - - .title-link { - color: inherit; - font-weight: inherit; - text-decoration: none; - } - } - - pre code { - background-color: inherit; - color: inherit; - } - - code { - background-color: $lighter-alt-bg-color-dark; - color: $fg-color-dark; - } - - blockquote { - border-left: 2px solid $alt-bg-color-dark; - } - - th, - td { - padding: 1.6rem; - } - - table { - border-collapse: collapse; - } - - table td, - table th { - border: 2px solid $alt-fg-color-dark; - } - - table tr:first-child th { - border-top: 0; - } - - table tr:last-child td { - border-bottom: 0; - } - - table tr td:first-child, - table tr th:first-child { - border-left: 0; - } - - table tr td:last-child, - table tr th:last-child { - border-right: 0; - } -} - -body.colorscheme-dark { - @include base_dark(); -} - -body.colorscheme-auto { - @media (prefers-color-scheme: dark) { - @include base_dark(); - } -} diff --git a/themes/hugo-coder/assets/scss/_base_rtl.scss b/themes/hugo-coder/assets/scss/_base_rtl.scss deleted file mode 100644 index e237fd5d..00000000 --- a/themes/hugo-coder/assets/scss/_base_rtl.scss +++ /dev/null @@ -1,24 +0,0 @@ -body.rtl { - direction: rtl; - - pre { - direction: ltr; - } - - blockquote { - border-left: none; - border-right: 2px solid $alt-bg-color; - padding-left: 0; - padding-right: 1.6rem; - } - - table tr td:first-child, - table tr th:first-child { - border-right: 0; - } - - table tr td:last-child, - table tr th:last-child { - border-left: 0; - } -} diff --git a/themes/hugo-coder/assets/scss/_content.scss b/themes/hugo-coder/assets/scss/_content.scss deleted file mode 100644 index e6894573..00000000 --- a/themes/hugo-coder/assets/scss/_content.scss +++ /dev/null @@ -1,245 +0,0 @@ -.content { - flex: 1; - display: flex; - margin-top: 1.6rem; - margin-bottom: 3.2rem; - - header { - margin-top: 6.4rem; - margin-bottom: 3.2rem; - - h1 { - font-size: 4.2rem; - line-height: 4.6rem; - margin: 0; - - @media only screen and (max-width: 768px) { - font-size: 4rem; - line-height: 4.4rem; - } - } - } - - article { - /* - a:where(.external-link)::after { - @include fa-icon; - content: fa-content($fa-var-external-link); - padding-left: 0.5em; - font-size: 0.75em; - } - */ - details { - summary { - cursor: pointer; - } - } - - footer { - margin-top: 4rem; - - .see-also { - margin: 3.2rem 0; - - h3 { - margin: 3.2rem 0; - } - } - } - - p { - text-align: justify; - text-justify: auto; - hyphens: auto; - } - } - - .post { - .post-title { - margin-bottom: 0.75em; - } - - .post-meta { - i { - text-align: center; - width: 1.6rem; - margin-left: 0; - margin-right: 0.5rem; - } - - .date { - .posted-on { - margin-left: 0; - margin-right: 1.5rem; - } - } - - .tags { - .tag { - display: inline-block; - padding: 0.3rem 0.6rem; - background-color: $alt-bg-color; - border-radius: 0.6rem; - line-height: 1.4em; - - a { - color: $fg-color; - } - - a:active { - color: $fg-color; - } - } - } - } - } - - figure { - margin: 0; - padding: 0; - } - - figcaption p { - text-align: center; - font-style: italic; - font-size: 1.6rem; - margin: 0; - } -} - -.avatar img { - width: 20rem; - height: auto; - border-radius: 50%; - - @media only screen and (max-width: 768px) { - width: 10rem; - } -} - -.list { - ul { - margin: 3.2rem 0 3.2rem 0; - list-style: none; - padding: 0; - - li { - font-size: 1.8rem; - - @media only screen and (max-width: 768px) { - margin: 1.6rem 0 1.6rem 0; - } - - .date { - display: inline-block; - flex: 1; - width: 20rem; - text-align: right; - margin-right: 3rem; - - @media only screen and (max-width: 768px) { - display: block; - text-align: left; - } - } - - .title { - font-size: 1.8rem; - flex: 2; - color: $fg-color; - font-family: $font-family; - font-weight: 700; - - &:hover, - &:focus { - color: $link-color; - } - } - } - } - - ul:not(.pagination) { - li { - @media only screen and (min-width: 768.1px) { - display: flex; - } - } - } -} - -.centered { - display: flex; - align-items: center; - justify-content: center; - - .about { - text-align: center; - - h1 { - margin-top: 2rem; - margin-bottom: 0.5rem; - } - - h2 { - margin-top: 1rem; - margin-bottom: 0.5rem; - font-size: 2.4rem; - - @media only screen and (max-width: 768px) { - font-size: 2rem; - } - } - - ul { - list-style: none; - margin: 3rem 0 1rem 0; - padding: 0; - - li { - display: inline-block; - position: relative; - - a { - color: $fg-color; - text-transform: uppercase; - margin-left: 1rem; - margin-right: 1rem; - font-size: 1.6rem; - - &:hover, - &:focus { - color: $link-color; - } - - @media only screen and (max-width: 768px) { - font-size: 1.4rem; - } - } - } - } - } - - .error { - text-align: center; - - h1 { - margin-top: 2rem; - margin-bottom: 0.5rem; - font-size: 4.6rem; - - @media only screen and (max-width: 768px) { - font-size: 3.2rem; - } - } - - h2 { - margin-top: 2rem; - margin-bottom: 3.2rem; - font-size: 3.2rem; - - @media only screen and (max-width: 768px) { - font-size: 2.8rem; - } - } - } -} diff --git a/themes/hugo-coder/assets/scss/_content_dark.scss b/themes/hugo-coder/assets/scss/_content_dark.scss deleted file mode 100644 index c4b961af..00000000 --- a/themes/hugo-coder/assets/scss/_content_dark.scss +++ /dev/null @@ -1,59 +0,0 @@ -@mixin content_dark { - .content { - .post { - .tags { - .tag { - background-color: $alt-bg-color-dark; - - a { - color: $fg-color-dark; - } - a:active { - color: $fg-color-dark; - } - } - } - } - .list { - ul { - li { - .title { - color: $fg-color-dark; - - &:hover, - &:focus { - color: $link-color-dark; - } - } - } - } - } - - .centered { - .about { - ul { - li { - a { - color: $fg-color-dark; - - &:hover, - &:focus { - color: $link-color-dark; - } - } - } - } - } - } - } -} - -body.colorscheme-dark { - @include content_dark(); -} - -body.colorscheme-auto { - @media (prefers-color-scheme: dark) { - @include content_dark(); - } -} diff --git a/themes/hugo-coder/assets/scss/_content_rtl.scss b/themes/hugo-coder/assets/scss/_content_rtl.scss deleted file mode 100644 index e3868f97..00000000 --- a/themes/hugo-coder/assets/scss/_content_rtl.scss +++ /dev/null @@ -1,36 +0,0 @@ -body.rtl { - .content { - .post { - .post-meta { - .posted-on { - margin-left: 1.5rem; - margin-right: 0; - } - } - - .tags, - .categories { - i { - margin-left: 0.5rem; - margin-right: 0; - } - } - } - } - - .list { - ul { - li { - .date { - text-align: left; - margin-left: 3rem; - margin-right: 0; - - @media only screen and (max-width: 768px) { - text-align: right; - } - } - } - } - } -} diff --git a/themes/hugo-coder/assets/scss/_float.scss b/themes/hugo-coder/assets/scss/_float.scss deleted file mode 100644 index 97cddc5c..00000000 --- a/themes/hugo-coder/assets/scss/_float.scss +++ /dev/null @@ -1,38 +0,0 @@ -.float-container { - bottom: 2rem; - right: 2rem; - z-index: 100; - position: fixed; - font-size: 1.6em; - - a { - position: relative; - display: inline-block; - width: 3rem; - height: 3rem; - font-size: 2rem; - color: $alt-fg-color; - background-color: $alt-bg-color; - border-radius: 0.2rem; - opacity: 0.5; - transition: all 0.25s ease-in; - - &:hover, - &:focus { - color: $link-color; - opacity: 1; - - @media only screen and (max-width: 768px) { - color: $alt-fg-color; - opacity: 0.5; - } - } - - i { - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - } - } -} diff --git a/themes/hugo-coder/assets/scss/_float_dark.scss b/themes/hugo-coder/assets/scss/_float_dark.scss deleted file mode 100644 index 348dafe5..00000000 --- a/themes/hugo-coder/assets/scss/_float_dark.scss +++ /dev/null @@ -1,27 +0,0 @@ -@mixin float_dark { - .float-container { - a { - color: $alt-fg-color-dark; - background-color: $alt-bg-color-dark; - - &:hover, - &:focus { - color: $link-color-dark; - - @media only screen and (max-width: 768px) { - color: $alt-fg-color-dark; - } - } - } - } -} - -body.colorscheme-dark { - @include float_dark(); -} - -body.colorscheme-auto { - @media (prefers-color-scheme: dark) { - @include float_dark(); - } -} diff --git a/themes/hugo-coder/assets/scss/_footer.scss b/themes/hugo-coder/assets/scss/_footer.scss deleted file mode 100644 index 10b0c0f7..00000000 --- a/themes/hugo-coder/assets/scss/_footer.scss +++ /dev/null @@ -1,11 +0,0 @@ -.footer { - width: 100%; - text-align: center; - font-size: 1.6rem; - line-height: 2rem; - margin-bottom: 1rem; - - a { - color: $link-color; - } -} diff --git a/themes/hugo-coder/assets/scss/_footer_dark.scss b/themes/hugo-coder/assets/scss/_footer_dark.scss deleted file mode 100644 index dd02be8b..00000000 --- a/themes/hugo-coder/assets/scss/_footer_dark.scss +++ /dev/null @@ -1,17 +0,0 @@ -@mixin footer_dark { - .footer { - a { - color: $link-color-dark; - } - } -} - -body.colorscheme-dark { - @include footer_dark(); -} - -body.colorscheme-auto { - @media (prefers-color-scheme: dark) { - @include footer_dark(); - } -} diff --git a/themes/hugo-coder/assets/scss/_navigation.scss b/themes/hugo-coder/assets/scss/_navigation.scss deleted file mode 100644 index 85e43b7e..00000000 --- a/themes/hugo-coder/assets/scss/_navigation.scss +++ /dev/null @@ -1,152 +0,0 @@ -.navigation { - height: 6rem; - width: 100%; - - a, - span { - display: inline; - font-size: 1.7rem; - font-family: $font-family; - font-weight: 600; - color: $fg-color; - } - - a { - - &:hover, - &:focus { - color: $link-color; - } - } - - .navigation-title { - letter-spacing: 0.1rem; - text-transform: uppercase; - } - - .navigation-list { - float: right; - list-style: none; - margin-bottom: 0; - margin-top: 0; - - @media only screen and (max-width: 768px) { - position: relative; - top: 2rem; - right: 0; - z-index: 5; - visibility: hidden; - opacity: 0; - padding: 0; - max-height: 0; - width: 100%; - background-color: $bg-color; - border-top: solid 2px $alt-bg-color; - border-bottom: solid 2px $alt-bg-color; - transition: opacity 0.25s, max-height 0.15s linear; - } - - .navigation-item { - float: left; - margin: 0; - position: relative; - - @media only screen and (max-width: 768px) { - float: none !important; - text-align: center; - - a, - span { - line-height: 5rem; - } - } - - a, - span { - margin-left: 1rem; - margin-right: 1rem; - } - } - - .separator { - @media only screen and (max-width: 768px) { - display: none; - } - } - - .menu-separator { - @media only screen and (max-width: 768px) { - border-top: 2px solid $fg-color; - margin: 0 8rem; - - span { - display: none; - } - } - } - } - - #dark-mode-toggle { - margin: 1.7rem 0; - font-size: 2.4rem; - line-height: inherit; - bottom: 2rem; - left: 2rem; - z-index: 100; - position: fixed; - } - - #menu-toggle { - display: none; - - @media only screen and (max-width: 768px) { - display: initial; - position: relative; - left: -99999px; - opacity: 0; - - &:checked+label>i { - color: $alt-bg-color; - } - - &:checked+label+ul { - visibility: visible; - opacity: 1; - max-height: 100rem; - } - - &:focus-visible+label { - outline-style: auto; - } - } - } - - .menu-button { - display: none; - - @media only screen and (max-width: 768px) { - position: relative; - display: block; - font-size: 2.4rem; - font-weight: 400; - } - - i { - - &:hover, - &:focus { - color: $alt-fg-color; - } - } - } - - i { - color: $fg-color; - cursor: pointer; - - &:hover, - &:focus { - color: $link-color; - } - } -} diff --git a/themes/hugo-coder/assets/scss/_navigation_dark.scss b/themes/hugo-coder/assets/scss/_navigation_dark.scss deleted file mode 100644 index 4cbd5548..00000000 --- a/themes/hugo-coder/assets/scss/_navigation_dark.scss +++ /dev/null @@ -1,68 +0,0 @@ -@mixin navigation_dark { - .navigation { - - a, - span { - color: $fg-color-dark; - } - - a { - - &:hover, - &:focus { - color: $link-color-dark; - } - } - - .navigation-list { - @media only screen and (max-width: 768px) { - background-color: $bg-color-dark; - border-top: solid 2px $alt-bg-color-dark; - border-bottom: solid 2px $alt-bg-color-dark; - } - - .menu-separator { - @media only screen and (max-width: 768px) { - border-top: 2px solid $fg-color-dark; - } - } - } - - #menu-toggle { - @media only screen and (max-width: 768px) { - &:checked+label>i { - color: $alt-bg-color-dark; - } - } - } - - i { - color: $fg-color-dark; - - &:hover, - &:focus { - color: $link-color-dark; - } - } - - .menu-button { - i { - - &:hover, - &:focus { - color: $alt-fg-color-dark; - } - } - } - } -} - -body.colorscheme-dark { - @include navigation_dark(); -} - -body.colorscheme-auto { - @media (prefers-color-scheme: dark) { - @include navigation_dark(); - } -} diff --git a/themes/hugo-coder/assets/scss/_navigation_rtl.scss b/themes/hugo-coder/assets/scss/_navigation_rtl.scss deleted file mode 100644 index 475c9328..00000000 --- a/themes/hugo-coder/assets/scss/_navigation_rtl.scss +++ /dev/null @@ -1,20 +0,0 @@ -body.rtl { - .navigation-list { - float: left; - - @media only screen and (max-width: 768px) { - left: 0; - right: auto; - } - - .navigation-item { - float: right; - } - } - - .menu-button { - @media only screen and (max-width: 768px) { - float: left; - } - } -} diff --git a/themes/hugo-coder/assets/scss/_notices.scss b/themes/hugo-coder/assets/scss/_notices.scss deleted file mode 100644 index 1b3a5e77..00000000 --- a/themes/hugo-coder/assets/scss/_notices.scss +++ /dev/null @@ -1,111 +0,0 @@ -.notice { - border-radius: 0.2rem; - position: relative; - margin: 2rem 0; - padding: 0 0.75rem; - overflow: auto; - - .notice-title { - position: relative; - font-weight: 700; - margin: 0 -0.75rem; - padding: 0.2rem 3.5rem; - border-bottom: 1px solid $bg-color; - - i { - position: absolute; - top: 50%; - left: 1.8rem; - transform: translate(-50%, -50%); - } - } - - .notice-content { - display: block; - margin: 2rem 2rem; - } -} - -.notice.note { - background-color: $bg-color-notice-note-content; - - .notice-title { - background-color: $bg-color-notice-note-title; - - i { - color: $fg-color-notice-note-icon; - } - } -} - -.notice.tip { - background-color: $bg-color-notice-tip-content; - - .notice-title { - background-color: $bg-color-notice-tip-title; - - i { - color: $fg-color-notice-tip-icon; - } - } -} - -.notice.example { - background-color: $bg-color-notice-example-content; - - .notice-title { - background-color: $bg-color-notice-example-title; - - i { - color: $fg-color-notice-example-icon; - } - } -} - -.notice.question { - background-color: $bg-color-notice-question-content; - - .notice-title { - background-color: $bg-color-notice-question-title; - - i { - color: $fg-color-notice-question-icon; - } - } -} - -.notice.info { - background-color: $bg-color-notice-info-content; - - .notice-title { - background-color: $bg-color-notice-info-title; - - i { - color: $fg-color-notice-info-icon; - } - } -} - -.notice.warning { - background-color: $bg-color-notice-warning-content; - - .notice-title { - background-color: $bg-color-notice-warning-title; - - i { - color: $fg-color-notice-warning-icon; - } - } -} - -.notice.error { - background-color: $bg-color-notice-error-content; - - .notice-title { - background-color: $bg-color-notice-error-title; - - i { - color: $fg-color-notice-error-icon; - } - } -} diff --git a/themes/hugo-coder/assets/scss/_notices_dark.scss b/themes/hugo-coder/assets/scss/_notices_dark.scss deleted file mode 100644 index 00d3f3a7..00000000 --- a/themes/hugo-coder/assets/scss/_notices_dark.scss +++ /dev/null @@ -1,17 +0,0 @@ -@mixin notices_dark { - .notice { - .notice-title { - border-bottom: 1px solid $bg-color-dark; - } - } -} - -body.colorscheme-dark { - @include notices_dark(); -} - -body.colorscheme-auto { - @media (prefers-color-scheme: dark) { - @include notices_dark(); - } -} diff --git a/themes/hugo-coder/assets/scss/_pagination.scss b/themes/hugo-coder/assets/scss/_pagination.scss deleted file mode 100644 index 33214bbd..00000000 --- a/themes/hugo-coder/assets/scss/_pagination.scss +++ /dev/null @@ -1,27 +0,0 @@ -.pagination { - margin-top: 6rem; - text-align: center; - font-family: $font-family; - - li { - display: inline; - text-align: center; - font-weight: 700; - - span { - margin: 0; - text-align: center; - width: 3.2rem; - } - - a { - font-weight: 300; - - span { - margin: 0; - text-align: center; - width: 3.2rem; - } - } - } -} diff --git a/themes/hugo-coder/assets/scss/_syntax.scss b/themes/hugo-coder/assets/scss/_syntax.scss deleted file mode 100644 index e5121ac4..00000000 --- a/themes/hugo-coder/assets/scss/_syntax.scss +++ /dev/null @@ -1,86 +0,0 @@ -/* Background */ .bg { background-color: #ffffff; } -/* PreWrapper */ .chroma { background-color: #ffffff; } -/* Other */ .chroma .x { } -/* Error */ .chroma .err { color: #a61717; background-color: #e3d2d2 } -/* CodeLine */ .chroma .cl { } -/* LineLink */ .chroma .lnlinks { outline: none; text-decoration: none; color: inherit } -/* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; } -/* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; } -/* LineHighlight */ .chroma .hl { background-color: #ffffcc } -/* LineNumbersTable */ .chroma .lnt { white-space: pre; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f } -/* LineNumbers */ .chroma .ln { white-space: pre; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f } -/* Line */ .chroma .line { display: flex; } -/* Keyword */ .chroma .k { color: #000000; font-weight: bold } -/* KeywordConstant */ .chroma .kc { color: #000000; font-weight: bold } -/* KeywordDeclaration */ .chroma .kd { color: #000000; font-weight: bold } -/* KeywordNamespace */ .chroma .kn { color: #000000; font-weight: bold } -/* KeywordPseudo */ .chroma .kp { color: #000000; font-weight: bold } -/* KeywordReserved */ .chroma .kr { color: #000000; font-weight: bold } -/* KeywordType */ .chroma .kt { color: #445588; font-weight: bold } -/* Name */ .chroma .n { } -/* NameAttribute */ .chroma .na { color: #008080 } -/* NameBuiltin */ .chroma .nb { color: #0086b3 } -/* NameBuiltinPseudo */ .chroma .bp { color: #999999 } -/* NameClass */ .chroma .nc { color: #445588; font-weight: bold } -/* NameConstant */ .chroma .no { color: #008080 } -/* NameDecorator */ .chroma .nd { color: #3c5d5d; font-weight: bold } -/* NameEntity */ .chroma .ni { color: #800080 } -/* NameException */ .chroma .ne { color: #990000; font-weight: bold } -/* NameFunction */ .chroma .nf { color: #990000; font-weight: bold } -/* NameFunctionMagic */ .chroma .fm { } -/* NameLabel */ .chroma .nl { color: #990000; font-weight: bold } -/* NameNamespace */ .chroma .nn { color: #555555 } -/* NameOther */ .chroma .nx { } -/* NameProperty */ .chroma .py { } -/* NameTag */ .chroma .nt { color: #000080 } -/* NameVariable */ .chroma .nv { color: #008080 } -/* NameVariableClass */ .chroma .vc { color: #008080 } -/* NameVariableGlobal */ .chroma .vg { color: #008080 } -/* NameVariableInstance */ .chroma .vi { color: #008080 } -/* NameVariableMagic */ .chroma .vm { } -/* Literal */ .chroma .l { } -/* LiteralDate */ .chroma .ld { } -/* LiteralString */ .chroma .s { color: #dd1144 } -/* LiteralStringAffix */ .chroma .sa { color: #dd1144 } -/* LiteralStringBacktick */ .chroma .sb { color: #dd1144 } -/* LiteralStringChar */ .chroma .sc { color: #dd1144 } -/* LiteralStringDelimiter */ .chroma .dl { color: #dd1144 } -/* LiteralStringDoc */ .chroma .sd { color: #dd1144 } -/* LiteralStringDouble */ .chroma .s2 { color: #dd1144 } -/* LiteralStringEscape */ .chroma .se { color: #dd1144 } -/* LiteralStringHeredoc */ .chroma .sh { color: #dd1144 } -/* LiteralStringInterpol */ .chroma .si { color: #dd1144 } -/* LiteralStringOther */ .chroma .sx { color: #dd1144 } -/* LiteralStringRegex */ .chroma .sr { color: #009926 } -/* LiteralStringSingle */ .chroma .s1 { color: #dd1144 } -/* LiteralStringSymbol */ .chroma .ss { color: #990073 } -/* LiteralNumber */ .chroma .m { color: #009999 } -/* LiteralNumberBin */ .chroma .mb { color: #009999 } -/* LiteralNumberFloat */ .chroma .mf { color: #009999 } -/* LiteralNumberHex */ .chroma .mh { color: #009999 } -/* LiteralNumberInteger */ .chroma .mi { color: #009999 } -/* LiteralNumberIntegerLong */ .chroma .il { color: #009999 } -/* LiteralNumberOct */ .chroma .mo { color: #009999 } -/* Operator */ .chroma .o { color: #000000; font-weight: bold } -/* OperatorWord */ .chroma .ow { color: #000000; font-weight: bold } -/* Punctuation */ .chroma .p { } -/* Comment */ .chroma .c { color: #999988; font-style: italic } -/* CommentHashbang */ .chroma .ch { color: #999988; font-style: italic } -/* CommentMultiline */ .chroma .cm { color: #999988; font-style: italic } -/* CommentSingle */ .chroma .c1 { color: #999988; font-style: italic } -/* CommentSpecial */ .chroma .cs { color: #999999; font-weight: bold; font-style: italic } -/* CommentPreproc */ .chroma .cp { color: #999999; font-weight: bold; font-style: italic } -/* CommentPreprocFile */ .chroma .cpf { color: #999999; font-weight: bold; font-style: italic } -/* Generic */ .chroma .g { } -/* GenericDeleted */ .chroma .gd { color: #000000; background-color: #ffdddd } -/* GenericEmph */ .chroma .ge { color: #000000; font-style: italic } -/* GenericError */ .chroma .gr { color: #aa0000 } -/* GenericHeading */ .chroma .gh { color: #999999 } -/* GenericInserted */ .chroma .gi { color: #000000; background-color: #ddffdd } -/* GenericOutput */ .chroma .go { color: #888888 } -/* GenericPrompt */ .chroma .gp { color: #555555 } -/* GenericStrong */ .chroma .gs { font-weight: bold } -/* GenericSubheading */ .chroma .gu { color: #aaaaaa } -/* GenericTraceback */ .chroma .gt { color: #aa0000 } -/* GenericUnderline */ .chroma .gl { text-decoration: underline } -/* TextWhitespace */ .chroma .w { color: #bbbbbb } diff --git a/themes/hugo-coder/assets/scss/_syntax_dark.scss b/themes/hugo-coder/assets/scss/_syntax_dark.scss deleted file mode 100644 index b95004a5..00000000 --- a/themes/hugo-coder/assets/scss/_syntax_dark.scss +++ /dev/null @@ -1,98 +0,0 @@ -@mixin syntax_dark { - /* Background */ .bg { color: #c9d1d9; background-color: #0d1117; } - /* PreWrapper */ .chroma { color: #c9d1d9; background-color: #0d1117; } - /* Other */ .chroma .x { } - /* Error */ .chroma .err { color: #f85149 } - /* CodeLine */ .chroma .cl { } - /* LineLink */ .chroma .lnlinks { outline: none; text-decoration: none; color: inherit } - /* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; } - /* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; } - /* LineHighlight */ .chroma .hl { background-color: #ffffcc } - /* LineNumbersTable */ .chroma .lnt { white-space: pre; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #64686c } - /* LineNumbers */ .chroma .ln { white-space: pre; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #6e7681 } - /* Line */ .chroma .line { display: flex; } - /* Keyword */ .chroma .k { color: #ff7b72 } - /* KeywordConstant */ .chroma .kc { color: #79c0ff } - /* KeywordDeclaration */ .chroma .kd { color: #ff7b72 } - /* KeywordNamespace */ .chroma .kn { color: #ff7b72 } - /* KeywordPseudo */ .chroma .kp { color: #79c0ff } - /* KeywordReserved */ .chroma .kr { color: #ff7b72 } - /* KeywordType */ .chroma .kt { color: #ff7b72 } - /* Name */ .chroma .n { } - /* NameAttribute */ .chroma .na { } - /* NameBuiltin */ .chroma .nb { } - /* NameBuiltinPseudo */ .chroma .bp { } - /* NameClass */ .chroma .nc { color: #f0883e; font-weight: bold } - /* NameConstant */ .chroma .no { color: #79c0ff; font-weight: bold } - /* NameDecorator */ .chroma .nd { color: #d2a8ff; font-weight: bold } - /* NameEntity */ .chroma .ni { color: #ffa657 } - /* NameException */ .chroma .ne { color: #f0883e; font-weight: bold } - /* NameFunction */ .chroma .nf { color: #d2a8ff; font-weight: bold } - /* NameFunctionMagic */ .chroma .fm { } - /* NameLabel */ .chroma .nl { color: #79c0ff; font-weight: bold } - /* NameNamespace */ .chroma .nn { color: #ff7b72 } - /* NameOther */ .chroma .nx { } - /* NameProperty */ .chroma .py { color: #79c0ff } - /* NameTag */ .chroma .nt { color: #7ee787 } - /* NameVariable */ .chroma .nv { color: #79c0ff } - /* NameVariableClass */ .chroma .vc { } - /* NameVariableGlobal */ .chroma .vg { } - /* NameVariableInstance */ .chroma .vi { } - /* NameVariableMagic */ .chroma .vm { } - /* Literal */ .chroma .l { color: #a5d6ff } - /* LiteralDate */ .chroma .ld { color: #79c0ff } - /* LiteralString */ .chroma .s { color: #a5d6ff } - /* LiteralStringAffix */ .chroma .sa { color: #79c0ff } - /* LiteralStringBacktick */ .chroma .sb { color: #a5d6ff } - /* LiteralStringChar */ .chroma .sc { color: #a5d6ff } - /* LiteralStringDelimiter */ .chroma .dl { color: #79c0ff } - /* LiteralStringDoc */ .chroma .sd { color: #a5d6ff } - /* LiteralStringDouble */ .chroma .s2 { color: #a5d6ff } - /* LiteralStringEscape */ .chroma .se { color: #79c0ff } - /* LiteralStringHeredoc */ .chroma .sh { color: #79c0ff } - /* LiteralStringInterpol */ .chroma .si { color: #a5d6ff } - /* LiteralStringOther */ .chroma .sx { color: #a5d6ff } - /* LiteralStringRegex */ .chroma .sr { color: #79c0ff } - /* LiteralStringSingle */ .chroma .s1 { color: #a5d6ff } - /* LiteralStringSymbol */ .chroma .ss { color: #a5d6ff } - /* LiteralNumber */ .chroma .m { color: #a5d6ff } - /* LiteralNumberBin */ .chroma .mb { color: #a5d6ff } - /* LiteralNumberFloat */ .chroma .mf { color: #a5d6ff } - /* LiteralNumberHex */ .chroma .mh { color: #a5d6ff } - /* LiteralNumberInteger */ .chroma .mi { color: #a5d6ff } - /* LiteralNumberIntegerLong */ .chroma .il { color: #a5d6ff } - /* LiteralNumberOct */ .chroma .mo { color: #a5d6ff } - /* Operator */ .chroma .o { color: #ff7b72; font-weight: bold } - /* OperatorWord */ .chroma .ow { color: #ff7b72; font-weight: bold } - /* Punctuation */ .chroma .p { } - /* Comment */ .chroma .c { color: #8b949e; font-style: italic } - /* CommentHashbang */ .chroma .ch { color: #8b949e; font-style: italic } - /* CommentMultiline */ .chroma .cm { color: #8b949e; font-style: italic } - /* CommentSingle */ .chroma .c1 { color: #8b949e; font-style: italic } - /* CommentSpecial */ .chroma .cs { color: #8b949e; font-weight: bold; font-style: italic } - /* CommentPreproc */ .chroma .cp { color: #8b949e; font-weight: bold; font-style: italic } - /* CommentPreprocFile */ .chroma .cpf { color: #8b949e; font-weight: bold; font-style: italic } - /* Generic */ .chroma .g { } - /* GenericDeleted */ .chroma .gd { color: #ffa198; background-color: #490202 } - /* GenericEmph */ .chroma .ge { font-style: italic } - /* GenericError */ .chroma .gr { color: #ffa198 } - /* GenericHeading */ .chroma .gh { color: #79c0ff; font-weight: bold } - /* GenericInserted */ .chroma .gi { color: #56d364; background-color: #0f5323 } - /* GenericOutput */ .chroma .go { color: #8b949e } - /* GenericPrompt */ .chroma .gp { color: #8b949e } - /* GenericStrong */ .chroma .gs { font-weight: bold } - /* GenericSubheading */ .chroma .gu { color: #79c0ff } - /* GenericTraceback */ .chroma .gt { color: #ff7b72 } - /* GenericUnderline */ .chroma .gl { text-decoration: underline } - /* TextWhitespace */ .chroma .w { color: #6e7681 } -} - -body.colorscheme-dark { - @include syntax_dark(); -} - -body.colorscheme-auto { - @media (prefers-color-scheme: dark) { - @include syntax_dark(); - } -} diff --git a/themes/hugo-coder/assets/scss/_tabs.scss b/themes/hugo-coder/assets/scss/_tabs.scss deleted file mode 100644 index b8466c81..00000000 --- a/themes/hugo-coder/assets/scss/_tabs.scss +++ /dev/null @@ -1,77 +0,0 @@ -.tabs { - display: flex; - flex-wrap: wrap; - margin: 2rem 0 2rem 0; - position: relative; - - &.tabs-left { - justify-content: flex-start; - - label.tab-label { - margin-right: 0.5rem; - } - - .tab-content { - border-radius: 0px 4px 4px 4px; - } - } - - &.tabs-right { - justify-content: flex-end; - - label.tab-label { - margin-left: 0.5rem; - } - - .tab-content { - border-radius: 4px 0px 4px 4px; - } - } - - input.tab-input { - display: none; - } - - label.tab-label { - background-color: $alt-bg-color; - border-color: $darker-alt-bg-color; - border-radius: 4px 4px 0px 0px; - - border-style: solid; - border-bottom-style: hidden; - - border-width: 1px; - cursor: pointer; - display: inline-block; - order: 1; - padding: 0.3rem 0.6rem; - position: relative; - top: 1px; - user-select: none; - } - - input.tab-input:checked + label.tab-label { - background-color: $bg-color; - } - - .tab-content { - background-color: $bg-color; - border-color: $darker-alt-bg-color; - border-style: solid; - border-width: 1px; - display: none; - order: 2; - padding: 1rem; - width: 100%; - } - - &.tabs-code { - .tab-content { - padding: 0.5rem; - - pre { - margin: 0; - } - } - } -} diff --git a/themes/hugo-coder/assets/scss/_tabs_dark.scss b/themes/hugo-coder/assets/scss/_tabs_dark.scss deleted file mode 100644 index c5be40ed..00000000 --- a/themes/hugo-coder/assets/scss/_tabs_dark.scss +++ /dev/null @@ -1,27 +0,0 @@ -@mixin tabs_dark { - .tabs { - label.tab-label { - background-color: $alt-bg-color-dark; - border-color: $lighter-alt-bg-color-dark; - } - - input.tab-input:checked + label.tab-label { - background-color: $bg-color-dark; - } - - .tab-content { - background-color: $bg-color-dark; - border-color: $lighter-alt-bg-color-dark; - } - } -} - -body.colorscheme-dark { - @include tabs_dark(); -} - -body.colorscheme-auto { - @media (prefers-color-scheme: dark) { - @include tabs_dark(); - } -} diff --git a/themes/hugo-coder/assets/scss/_taxonomies.scss b/themes/hugo-coder/assets/scss/_taxonomies.scss deleted file mode 100644 index d405c73a..00000000 --- a/themes/hugo-coder/assets/scss/_taxonomies.scss +++ /dev/null @@ -1,20 +0,0 @@ -.taxonomy { - li { - display: inline-block; - margin: 0.9rem; - } - - .taxonomy-element { - display: block; - padding: 0.3rem 0.9rem; - background-color: $alt-bg-color; - border-radius: 0.6rem; - - a { - color: $fg-color; - } - a:active { - color: $fg-color; - } - } -} diff --git a/themes/hugo-coder/assets/scss/_taxonomies_dark.scss b/themes/hugo-coder/assets/scss/_taxonomies_dark.scss deleted file mode 100644 index 77c734db..00000000 --- a/themes/hugo-coder/assets/scss/_taxonomies_dark.scss +++ /dev/null @@ -1,22 +0,0 @@ -@mixin taxonomy_dark { - .taxonomy-element { - background-color: $alt-bg-color-dark; - - a { - color: $fg-color-dark; - } - a:active { - color: $fg-color-dark; - } - } -} - -body.colorscheme-dark { - @include taxonomy_dark(); -} - -body.colorscheme-auto { - @media (prefers-color-scheme: dark) { - @include taxonomy_dark(); - } -} diff --git a/themes/hugo-coder/assets/scss/_variables.scss b/themes/hugo-coder/assets/scss/_variables.scss deleted file mode 100644 index ef4804d6..00000000 --- a/themes/hugo-coder/assets/scss/_variables.scss +++ /dev/null @@ -1,73 +0,0 @@ -@mixin font($font-family, $font-file, $font-weight, $font-style) { - @font-face { - font-family: $font-family; - src: url($font-file+'.ttf') format('truetype'); - font-weight: $font-weight; - font-style: $font-weight; - } - } - - @include font('Baskerville', '/fonts/Libre_Baskerville/LibreBaskerville-Regular', "regular", "regular"); - @include font('Baskerville-Bold', '/fonts/Libre_Baskerville/LibreBaskerville-Bold', "bold", "regular"); - @include font('Baskerville-Italic', '/fonts/Libre_Baskerville/LibreBaskerville-Italic', "regular", "italic"); - - -// Fonts -$font-family: -apple-system, -"Baskerville", -BlinkMacSystemFont, -"Segoe UI", -Cantarell, -Helvetica, -"游ゴシック", -"PingFang SC", -STXihei,"华文细黑", -"Microsoft YaHei","微软雅黑", -SimSun,"宋体", -Heiti,"黑体", -sans-serif; - -$code-font-family: SFMono-Regular, -Consolas, -Liberation Mono, -Menlo, -monospace; - -// Colors -$bg-color: #ffffff !default; -$fg-color: #212121 !default; -$alt-bg-color: #e0e0e0 !default; -$alt-fg-color: #000 !default; -$darker-alt-bg-color: #ccc !default; -$link-color: #1565c0 !default; - -// Dark colors -$bg-color-dark: #212121 !default; -$fg-color-dark: #dadada !default; -$alt-bg-color-dark: #424242 !default; -$alt-fg-color-dark: #dadada !default; -$lighter-alt-bg-color-dark: #4f4f4f !default; -$link-color-dark: #42a5f5 !default; - -// Notice colors -$fg-color-notice-note-icon: #5e35b1 !default; -$bg-color-notice-note-title: #673ab71a !default; -$bg-color-notice-note-content: #7e57c21a !default; -$fg-color-notice-tip-icon: #00897b !default; -$bg-color-notice-tip-title: #0096881a !default; -$bg-color-notice-tip-content: #26a69a1a !default; -$fg-color-notice-example-icon: #6d4c41 !default; -$bg-color-notice-example-title: #7955481a !default; -$bg-color-notice-example-content: #8d6e631a !default; -$fg-color-notice-question-icon: #7cb342 !default; -$bg-color-notice-question-title: #8bc34a1a !default; -$bg-color-notice-question-content: #9ccc651a !default; -$fg-color-notice-info-icon: #1e88e5 !default; -$bg-color-notice-info-title: #2196f31a !default; -$bg-color-notice-info-content: #42a5f51a !default; -$fg-color-notice-warning-icon: #ffb300 !default; -$bg-color-notice-warning-title: #ffc1071a !default; -$bg-color-notice-warning-content: #ffca281a !default; -$fg-color-notice-error-icon: #e53935 !default; -$bg-color-notice-error-title: #f443361a !default; -$bg-color-notice-error-content: #ef53501a !default; diff --git a/themes/hugo-coder/assets/scss/coder-dark.scss b/themes/hugo-coder/assets/scss/coder-dark.scss deleted file mode 100644 index c05c5f50..00000000 --- a/themes/hugo-coder/assets/scss/coder-dark.scss +++ /dev/null @@ -1,10 +0,0 @@ -@import "variables"; -@import "base_dark"; -@import "content_dark"; -@import "notices_dark"; -@import "navigation_dark"; -@import "tabs_dark"; -@import "taxonomies_dark"; -@import "footer_dark"; -@import "float_dark"; -@import "syntax_dark"; diff --git a/themes/hugo-coder/assets/scss/coder-rtl.scss b/themes/hugo-coder/assets/scss/coder-rtl.scss deleted file mode 100644 index c65ad5c9..00000000 --- a/themes/hugo-coder/assets/scss/coder-rtl.scss +++ /dev/null @@ -1,4 +0,0 @@ -@import "_variables"; -@import "_base_rtl"; -@import "_content_rtl"; -@import "_navigation_rtl"; diff --git a/themes/hugo-coder/assets/scss/coder.scss b/themes/hugo-coder/assets/scss/coder.scss deleted file mode 100644 index e601683f..00000000 --- a/themes/hugo-coder/assets/scss/coder.scss +++ /dev/null @@ -1,13 +0,0 @@ -@import "css/normalize"; -@import "fork-awesome/fork-awesome"; -@import "variables"; -@import "base"; -@import "content"; -@import "notices"; -@import "navigation"; -@import "pagination"; -@import "tabs"; -@import "taxonomies"; -@import "footer"; -@import "float"; -@import "syntax"; diff --git a/themes/hugo-coder/assets/scss/css/normalize.css b/themes/hugo-coder/assets/scss/css/normalize.css deleted file mode 100644 index 8d6f3ff1..00000000 --- a/themes/hugo-coder/assets/scss/css/normalize.css +++ /dev/null @@ -1,350 +0,0 @@ -/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */ - -/* Document - ========================================================================== */ - -/** - * 1. Correct the line height in all browsers. - * 2. Prevent adjustments of font size after orientation changes in iOS. - */ - - html { - line-height: 1.15; /* 1 */ - -webkit-text-size-adjust: 100%; /* 2 */ - } - - /* Sections - ========================================================================== */ - - /** - * Remove the margin in all browsers. - */ - - body { - margin: 0; - } - - /** - * Render the `main` element consistently in IE. - */ - - main { - display: block; - } - - /** - * Correct the font size and margin on `h1` elements within `section` and - * `article` contexts in Chrome, Firefox, and Safari. - */ - - h1 { - font-size: 2em; - margin: 0.67em 0; - } - - /* Grouping content - ========================================================================== */ - - /** - * 1. Add the correct box sizing in Firefox. - * 2. Show the overflow in Edge and IE. - */ - - hr { - box-sizing: content-box; /* 1 */ - height: 0; /* 1 */ - overflow: visible; /* 2 */ - } - - /** - * 1. Correct the inheritance and scaling of font size in all browsers. - * 2. Correct the odd `em` font sizing in all browsers. - */ - - pre { - font-family: monospace, monospace; /* 1 */ - font-size: 1em; /* 2 */ - } - - /* Text-level semantics - ========================================================================== */ - - /** - * Remove the gray background on active links in IE 10. - */ - - a { - background-color: transparent; - word-wrap: break-word; - } - - /** - * 1. Remove the bottom border in Chrome 57- - * 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari. - */ - - abbr[title] { - border-bottom: none; /* 1 */ - text-decoration: underline; /* 2 */ - text-decoration: underline dotted; /* 2 */ - } - - /** - * Add the correct font weight in Chrome, Edge, and Safari. - */ - - b, - strong { - font-weight: bolder; - } - - /** - * 1. Correct the inheritance and scaling of font size in all browsers. - * 2. Correct the odd `em` font sizing in all browsers. - */ - - code, - kbd, - samp { - font-family: monospace, monospace; /* 1 */ - font-size: 1em; /* 2 */ - } - - /** - * Add the correct font size in all browsers. - */ - - small { - font-size: 80%; - } - - /** - * Prevent `sub` and `sup` elements from affecting the line height in - * all browsers. - */ - - sub, - sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; - } - - sub { - bottom: -0.25em; - } - - sup { - top: -0.5em; - } - - /* Embedded content - ========================================================================== */ - - /** - * Remove the border on images inside links in IE 10. - */ - - img { - border-style: none; - } - - /* Forms - ========================================================================== */ - - /** - * 1. Change the font styles in all browsers. - * 2. Remove the margin in Firefox and Safari. - */ - - button, - input, - optgroup, - select, - textarea { - font-family: inherit; /* 1 */ - font-size: 100%; /* 1 */ - line-height: 1.15; /* 1 */ - margin: 0; /* 2 */ - } - - /** - * Show the overflow in IE. - * 1. Show the overflow in Edge. - */ - - button, - input { /* 1 */ - overflow: visible; - } - - /** - * Remove the inheritance of text transform in Edge, Firefox, and IE. - * 1. Remove the inheritance of text transform in Firefox. - */ - - button, - select { /* 1 */ - text-transform: none; - } - - /** - * Correct the inability to style clickable types in iOS and Safari. - */ - - button, - [type="button"], - [type="reset"], - [type="submit"] { - -webkit-appearance: button; - } - - /** - * Remove the inner border and padding in Firefox. - */ - - button::-moz-focus-inner, - [type="button"]::-moz-focus-inner, - [type="reset"]::-moz-focus-inner, - [type="submit"]::-moz-focus-inner { - border-style: none; - padding: 0; - } - - /** - * Restore the focus styles unset by the previous rule. - */ - - button:-moz-focusring, - [type="button"]:-moz-focusring, - [type="reset"]:-moz-focusring, - [type="submit"]:-moz-focusring { - outline: 1px dotted ButtonText; - } - - /** - * Correct the padding in Firefox. - */ - - fieldset { - padding: 0.35em 0.75em 0.625em; - } - - /** - * 1. Correct the text wrapping in Edge and IE. - * 2. Correct the color inheritance from `fieldset` elements in IE. - * 3. Remove the padding so developers are not caught out when they zero out - * `fieldset` elements in all browsers. - */ - - legend { - box-sizing: border-box; /* 1 */ - color: inherit; /* 2 */ - display: table; /* 1 */ - max-width: 100%; /* 1 */ - padding: 0; /* 3 */ - white-space: normal; /* 1 */ - } - - /** - * Add the correct vertical alignment in Chrome, Firefox, and Opera. - */ - - progress { - vertical-align: baseline; - } - - /** - * Remove the default vertical scrollbar in IE 10+. - */ - - textarea { - overflow: auto; - } - - /** - * 1. Add the correct box sizing in IE 10. - * 2. Remove the padding in IE 10. - */ - - [type="checkbox"], - [type="radio"] { - box-sizing: border-box; /* 1 */ - padding: 0; /* 2 */ - } - - /** - * Correct the cursor style of increment and decrement buttons in Chrome. - */ - - [type="number"]::-webkit-inner-spin-button, - [type="number"]::-webkit-outer-spin-button { - height: auto; - } - - /** - * 1. Correct the odd appearance in Chrome and Safari. - * 2. Correct the outline style in Safari. - */ - - [type="search"] { - -webkit-appearance: textfield; /* 1 */ - outline-offset: -2px; /* 2 */ - } - - /** - * Remove the inner padding in Chrome and Safari on macOS. - */ - - [type="search"]::-webkit-search-decoration { - -webkit-appearance: none; - } - - /** - * 1. Correct the inability to style clickable types in iOS and Safari. - * 2. Change font properties to `inherit` in Safari. - */ - - ::-webkit-file-upload-button { - -webkit-appearance: button; /* 1 */ - font: inherit; /* 2 */ - } - - /* Interactive - ========================================================================== */ - - /* - * Add the correct display in Edge, IE 10+, and Firefox. - */ - - details { - display: block; - } - - /* - * Add the correct display in all browsers. - */ - - summary { - display: list-item; - } - - /* Misc - ========================================================================== */ - - /** - * Add the correct display in IE 10+. - */ - - template { - display: none; - } - - /** - * Add the correct display in IE 10. - */ - - [hidden] { - display: none; - } diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_animated.scss b/themes/hugo-coder/assets/scss/fork-awesome/_animated.scss deleted file mode 100644 index 543d5b33..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_animated.scss +++ /dev/null @@ -1,34 +0,0 @@ -// Spinning Icons -// -------------------------- - -.#{$fa-css-prefix}-spin { - -webkit-animation: #{$fa-css-prefix}-spin 2s infinite linear; - animation: #{$fa-css-prefix}-spin 2s infinite linear; -} - -.#{$fa-css-prefix}-pulse { - -webkit-animation: #{$fa-css-prefix}-spin 1s infinite steps(8); - animation: #{$fa-css-prefix}-spin 1s infinite steps(8); -} - -@-webkit-keyframes #{$fa-css-prefix}-spin { - 0% { - -webkit-transform: rotate(0deg); - transform: rotate(0deg); - } - 100% { - -webkit-transform: rotate(359deg); - transform: rotate(359deg); - } -} - -@keyframes #{$fa-css-prefix}-spin { - 0% { - -webkit-transform: rotate(0deg); - transform: rotate(0deg); - } - 100% { - -webkit-transform: rotate(359deg); - transform: rotate(359deg); - } -} diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_bordered-pulled.scss b/themes/hugo-coder/assets/scss/fork-awesome/_bordered-pulled.scss deleted file mode 100644 index d4b85a02..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_bordered-pulled.scss +++ /dev/null @@ -1,25 +0,0 @@ -// Bordered & Pulled -// ------------------------- - -.#{$fa-css-prefix}-border { - padding: .2em .25em .15em; - border: solid .08em $fa-border-color; - border-radius: .1em; -} - -.#{$fa-css-prefix}-pull-left { float: left; } -.#{$fa-css-prefix}-pull-right { float: right; } - -.#{$fa-css-prefix} { - &.#{$fa-css-prefix}-pull-left { margin-right: .3em; } - &.#{$fa-css-prefix}-pull-right { margin-left: .3em; } -} - -/* Deprecated as of 4.4.0 */ -.pull-right { float: right; } -.pull-left { float: left; } - -.#{$fa-css-prefix} { - &.pull-left { margin-right: .3em; } - &.pull-right { margin-left: .3em; } -} diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_core.scss b/themes/hugo-coder/assets/scss/fork-awesome/_core.scss deleted file mode 100644 index e5a87b59..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_core.scss +++ /dev/null @@ -1,12 +0,0 @@ -// Base Class Definition -// ------------------------- - -.#{$fa-css-prefix} { - display: inline-block; - font: normal normal normal #{$fa-font-size-base}/#{$fa-line-height-base} #{$fa-font-family}; // shortening font declaration - font-size: inherit; // can't have font-size inherit on line above, so need to override - text-rendering: auto; // optimizelegibility throws things off #1094 - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; - -} diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_fixed-width.scss b/themes/hugo-coder/assets/scss/fork-awesome/_fixed-width.scss deleted file mode 100644 index b221c981..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_fixed-width.scss +++ /dev/null @@ -1,6 +0,0 @@ -// Fixed Width Icons -// ------------------------- -.#{$fa-css-prefix}-fw { - width: (18em / 14); - text-align: center; -} diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_functions.scss b/themes/hugo-coder/assets/scss/fork-awesome/_functions.scss deleted file mode 100644 index 7ef2336c..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_functions.scss +++ /dev/null @@ -1,11 +0,0 @@ -// Functions -// -------------------------- - -// Helper function which adds quotes to preserve unicode values in CSS output. -// -// See: https://github.com/sass/sass/issues/1395 -// See: https://stackoverflow.com/questions/30421570/sass-unicode-escape-is-not-preserved-in-css-file - -@function fa-content($fa-var) { - @return unquote("\"#{$fa-var}\""); -} diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_icons.scss b/themes/hugo-coder/assets/scss/fork-awesome/_icons.scss deleted file mode 100644 index 4ac7aee1..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_icons.scss +++ /dev/null @@ -1,934 +0,0 @@ -/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen - readers do not read off random characters that represent icons */ - -.#{$fa-css-prefix}-glass:before { content: fa-content($fa-var-glass); } -.#{$fa-css-prefix}-music:before { content: fa-content($fa-var-music); } -.#{$fa-css-prefix}-search:before { content: fa-content($fa-var-search); } -.#{$fa-css-prefix}-envelope-o:before { content: fa-content($fa-var-envelope-o); } -.#{$fa-css-prefix}-heart:before { content: fa-content($fa-var-heart); } -.#{$fa-css-prefix}-star:before { content: fa-content($fa-var-star); } -.#{$fa-css-prefix}-star-o:before { content: fa-content($fa-var-star-o); } -.#{$fa-css-prefix}-user:before { content: fa-content($fa-var-user); } -.#{$fa-css-prefix}-film:before { content: fa-content($fa-var-film); } -.#{$fa-css-prefix}-th-large:before { content: fa-content($fa-var-th-large); } -.#{$fa-css-prefix}-th:before { content: fa-content($fa-var-th); } -.#{$fa-css-prefix}-th-list:before { content: fa-content($fa-var-th-list); } -.#{$fa-css-prefix}-check:before { content: fa-content($fa-var-check); } -.#{$fa-css-prefix}-remove:before, -.#{$fa-css-prefix}-close:before, -.#{$fa-css-prefix}-times:before { content: fa-content($fa-var-times); } -.#{$fa-css-prefix}-search-plus:before { content: fa-content($fa-var-search-plus); } -.#{$fa-css-prefix}-search-minus:before { content: fa-content($fa-var-search-minus); } -.#{$fa-css-prefix}-power-off:before { content: fa-content($fa-var-power-off); } -.#{$fa-css-prefix}-signal:before { content: fa-content($fa-var-signal); } -.#{$fa-css-prefix}-gear:before, -.#{$fa-css-prefix}-cog:before { content: fa-content($fa-var-cog); } -.#{$fa-css-prefix}-trash-o:before { content: fa-content($fa-var-trash-o); } -.#{$fa-css-prefix}-home:before { content: fa-content($fa-var-home); } -.#{$fa-css-prefix}-file-o:before { content: fa-content($fa-var-file-o); } -.#{$fa-css-prefix}-clock-o:before { content: fa-content($fa-var-clock-o); } -.#{$fa-css-prefix}-road:before { content: fa-content($fa-var-road); } -.#{$fa-css-prefix}-download:before { content: fa-content($fa-var-download); } -.#{$fa-css-prefix}-arrow-circle-o-down:before { content: fa-content($fa-var-arrow-circle-o-down); } -.#{$fa-css-prefix}-arrow-circle-o-up:before { content: fa-content($fa-var-arrow-circle-o-up); } -.#{$fa-css-prefix}-inbox:before { content: fa-content($fa-var-inbox); } -.#{$fa-css-prefix}-play-circle-o:before { content: fa-content($fa-var-play-circle-o); } -.#{$fa-css-prefix}-rotate-right:before, -.#{$fa-css-prefix}-repeat:before { content: fa-content($fa-var-repeat); } -.#{$fa-css-prefix}-sync:before, -.#{$fa-css-prefix}-refresh:before { content: fa-content($fa-var-refresh); } -.#{$fa-css-prefix}-list-alt:before { content: fa-content($fa-var-list-alt); } -.#{$fa-css-prefix}-lock:before { content: fa-content($fa-var-lock); } -.#{$fa-css-prefix}-flag:before { content: fa-content($fa-var-flag); } -.#{$fa-css-prefix}-headphones:before { content: fa-content($fa-var-headphones); } -.#{$fa-css-prefix}-volume-off:before { content: fa-content($fa-var-volume-off); } -.#{$fa-css-prefix}-volume-down:before { content: fa-content($fa-var-volume-down); } -.#{$fa-css-prefix}-volume-up:before { content: fa-content($fa-var-volume-up); } -.#{$fa-css-prefix}-qrcode:before { content: fa-content($fa-var-qrcode); } -.#{$fa-css-prefix}-barcode:before { content: fa-content($fa-var-barcode); } -.#{$fa-css-prefix}-tag:before { content: fa-content($fa-var-tag); } -.#{$fa-css-prefix}-tags:before { content: fa-content($fa-var-tags); } -.#{$fa-css-prefix}-book:before { content: fa-content($fa-var-book); } -.#{$fa-css-prefix}-bookmark:before { content: fa-content($fa-var-bookmark); } -.#{$fa-css-prefix}-print:before { content: fa-content($fa-var-print); } -.#{$fa-css-prefix}-camera:before { content: fa-content($fa-var-camera); } -.#{$fa-css-prefix}-font:before { content: fa-content($fa-var-font); } -.#{$fa-css-prefix}-bold:before { content: fa-content($fa-var-bold); } -.#{$fa-css-prefix}-italic:before { content: fa-content($fa-var-italic); } -.#{$fa-css-prefix}-text-height:before { content: fa-content($fa-var-text-height); } -.#{$fa-css-prefix}-text-width:before { content: fa-content($fa-var-text-width); } -.#{$fa-css-prefix}-align-left:before { content: fa-content($fa-var-align-left); } -.#{$fa-css-prefix}-align-center:before { content: fa-content($fa-var-align-center); } -.#{$fa-css-prefix}-align-right:before { content: fa-content($fa-var-align-right); } -.#{$fa-css-prefix}-align-justify:before { content: fa-content($fa-var-align-justify); } -.#{$fa-css-prefix}-list:before { content: fa-content($fa-var-list); } -.#{$fa-css-prefix}-dedent:before, -.#{$fa-css-prefix}-outdent:before { content: fa-content($fa-var-outdent); } -.#{$fa-css-prefix}-indent:before { content: fa-content($fa-var-indent); } -.#{$fa-css-prefix}-video:before, -.#{$fa-css-prefix}-video-camera:before { content: fa-content($fa-var-video-camera); } -.#{$fa-css-prefix}-photo:before, -.#{$fa-css-prefix}-image:before, -.#{$fa-css-prefix}-picture-o:before { content: fa-content($fa-var-picture-o); } -.#{$fa-css-prefix}-pencil:before { content: fa-content($fa-var-pencil); } -.#{$fa-css-prefix}-map-marker:before { content: fa-content($fa-var-map-marker); } -.#{$fa-css-prefix}-adjust:before { content: fa-content($fa-var-adjust); } -.#{$fa-css-prefix}-tint:before { content: fa-content($fa-var-tint); } -.#{$fa-css-prefix}-edit:before, -.#{$fa-css-prefix}-pencil-square-o:before { content: fa-content($fa-var-pencil-square-o); } -.#{$fa-css-prefix}-share-square-o:before { content: fa-content($fa-var-share-square-o); } -.#{$fa-css-prefix}-check-square-o:before { content: fa-content($fa-var-check-square-o); } -.#{$fa-css-prefix}-arrows:before { content: fa-content($fa-var-arrows); } -.#{$fa-css-prefix}-step-backward:before { content: fa-content($fa-var-step-backward); } -.#{$fa-css-prefix}-fast-backward:before { content: fa-content($fa-var-fast-backward); } -.#{$fa-css-prefix}-backward:before { content: fa-content($fa-var-backward); } -.#{$fa-css-prefix}-play:before { content: fa-content($fa-var-play); } -.#{$fa-css-prefix}-pause:before { content: fa-content($fa-var-pause); } -.#{$fa-css-prefix}-stop:before { content: fa-content($fa-var-stop); } -.#{$fa-css-prefix}-forward:before { content: fa-content($fa-var-forward); } -.#{$fa-css-prefix}-fast-forward:before { content: fa-content($fa-var-fast-forward); } -.#{$fa-css-prefix}-step-forward:before { content: fa-content($fa-var-step-forward); } -.#{$fa-css-prefix}-eject:before { content: fa-content($fa-var-eject); } -.#{$fa-css-prefix}-chevron-left:before { content: fa-content($fa-var-chevron-left); } -.#{$fa-css-prefix}-chevron-right:before { content: fa-content($fa-var-chevron-right); } -.#{$fa-css-prefix}-plus-circle:before { content: fa-content($fa-var-plus-circle); } -.#{$fa-css-prefix}-minus-circle:before { content: fa-content($fa-var-minus-circle); } -.#{$fa-css-prefix}-times-circle:before { content: fa-content($fa-var-times-circle); } -.#{$fa-css-prefix}-check-circle:before { content: fa-content($fa-var-check-circle); } -.#{$fa-css-prefix}-question-circle:before { content: fa-content($fa-var-question-circle); } -.#{$fa-css-prefix}-info-circle:before { content: fa-content($fa-var-info-circle); } -.#{$fa-css-prefix}-crosshairs:before { content: fa-content($fa-var-crosshairs); } -.#{$fa-css-prefix}-times-circle-o:before { content: fa-content($fa-var-times-circle-o); } -.#{$fa-css-prefix}-check-circle-o:before { content: fa-content($fa-var-check-circle-o); } -.#{$fa-css-prefix}-ban:before { content: fa-content($fa-var-ban); } -.#{$fa-css-prefix}-arrow-left:before { content: fa-content($fa-var-arrow-left); } -.#{$fa-css-prefix}-arrow-right:before { content: fa-content($fa-var-arrow-right); } -.#{$fa-css-prefix}-arrow-up:before { content: fa-content($fa-var-arrow-up); } -.#{$fa-css-prefix}-arrow-down:before { content: fa-content($fa-var-arrow-down); } -.#{$fa-css-prefix}-mail-forward:before, -.#{$fa-css-prefix}-share:before { content: fa-content($fa-var-share); } -.#{$fa-css-prefix}-expand:before { content: fa-content($fa-var-expand); } -.#{$fa-css-prefix}-compress:before { content: fa-content($fa-var-compress); } -.#{$fa-css-prefix}-plus:before { content: fa-content($fa-var-plus); } -.#{$fa-css-prefix}-minus:before { content: fa-content($fa-var-minus); } -.#{$fa-css-prefix}-asterisk:before { content: fa-content($fa-var-asterisk); } -.#{$fa-css-prefix}-exclamation-circle:before { content: fa-content($fa-var-exclamation-circle); } -.#{$fa-css-prefix}-gift:before { content: fa-content($fa-var-gift); } -.#{$fa-css-prefix}-leaf:before { content: fa-content($fa-var-leaf); } -.#{$fa-css-prefix}-fire:before { content: fa-content($fa-var-fire); } -.#{$fa-css-prefix}-eye:before { content: fa-content($fa-var-eye); } -.#{$fa-css-prefix}-eye-slash:before { content: fa-content($fa-var-eye-slash); } -.#{$fa-css-prefix}-warning:before, -.#{$fa-css-prefix}-exclamation-triangle:before { content: fa-content($fa-var-exclamation-triangle); } -.#{$fa-css-prefix}-plane:before { content: fa-content($fa-var-plane); } -.#{$fa-css-prefix}-calendar:before { content: fa-content($fa-var-calendar); } -.#{$fa-css-prefix}-random:before { content: fa-content($fa-var-random); } -.#{$fa-css-prefix}-comment:before { content: fa-content($fa-var-comment); } -.#{$fa-css-prefix}-magnet:before { content: fa-content($fa-var-magnet); } -.#{$fa-css-prefix}-chevron-up:before { content: fa-content($fa-var-chevron-up); } -.#{$fa-css-prefix}-chevron-down:before { content: fa-content($fa-var-chevron-down); } -.#{$fa-css-prefix}-retweet:before { content: fa-content($fa-var-retweet); } -.#{$fa-css-prefix}-shopping-cart:before { content: fa-content($fa-var-shopping-cart); } -.#{$fa-css-prefix}-folder:before { content: fa-content($fa-var-folder); } -.#{$fa-css-prefix}-folder-open:before { content: fa-content($fa-var-folder-open); } -.#{$fa-css-prefix}-arrows-v:before { content: fa-content($fa-var-arrows-v); } -.#{$fa-css-prefix}-arrows-h:before { content: fa-content($fa-var-arrows-h); } -.#{$fa-css-prefix}-bar-chart-o:before, -.#{$fa-css-prefix}-bar-chart:before { content: fa-content($fa-var-bar-chart); } -.#{$fa-css-prefix}-twitter-square:before { content: fa-content($fa-var-twitter-square); } -.#{$fa-css-prefix}-facebook-square:before { content: fa-content($fa-var-facebook-square); } -.#{$fa-css-prefix}-camera-retro:before { content: fa-content($fa-var-camera-retro); } -.#{$fa-css-prefix}-key:before { content: fa-content($fa-var-key); } -.#{$fa-css-prefix}-gears:before, -.#{$fa-css-prefix}-cogs:before { content: fa-content($fa-var-cogs); } -.#{$fa-css-prefix}-comments:before { content: fa-content($fa-var-comments); } -.#{$fa-css-prefix}-thumbs-o-up:before { content: fa-content($fa-var-thumbs-o-up); } -.#{$fa-css-prefix}-thumbs-o-down:before { content: fa-content($fa-var-thumbs-o-down); } -.#{$fa-css-prefix}-star-half:before { content: fa-content($fa-var-star-half); } -.#{$fa-css-prefix}-heart-o:before { content: fa-content($fa-var-heart-o); } -.#{$fa-css-prefix}-sign-out:before { content: fa-content($fa-var-sign-out); } -.#{$fa-css-prefix}-linkedin-square:before { content: fa-content($fa-var-linkedin-square); } -.#{$fa-css-prefix}-thumb-tack:before { content: fa-content($fa-var-thumb-tack); } -.#{$fa-css-prefix}-external-link:before { content: fa-content($fa-var-external-link); } -.#{$fa-css-prefix}-sign-in:before { content: fa-content($fa-var-sign-in); } -.#{$fa-css-prefix}-trophy:before { content: fa-content($fa-var-trophy); } -.#{$fa-css-prefix}-github-square:before { content: fa-content($fa-var-github-square); } -.#{$fa-css-prefix}-upload:before { content: fa-content($fa-var-upload); } -.#{$fa-css-prefix}-lemon-o:before { content: fa-content($fa-var-lemon-o); } -.#{$fa-css-prefix}-phone:before { content: fa-content($fa-var-phone); } -.#{$fa-css-prefix}-square-o:before { content: fa-content($fa-var-square-o); } -.#{$fa-css-prefix}-bookmark-o:before { content: fa-content($fa-var-bookmark-o); } -.#{$fa-css-prefix}-phone-square:before { content: fa-content($fa-var-phone-square); } -.#{$fa-css-prefix}-twitter:before { content: fa-content($fa-var-twitter); } -.#{$fa-css-prefix}-facebook-f:before, -.#{$fa-css-prefix}-facebook:before { content: fa-content($fa-var-facebook); } -.#{$fa-css-prefix}-github:before { content: fa-content($fa-var-github); } -.#{$fa-css-prefix}-unlock:before { content: fa-content($fa-var-unlock); } -.#{$fa-css-prefix}-credit-card:before { content: fa-content($fa-var-credit-card); } -.#{$fa-css-prefix}-feed:before, -.#{$fa-css-prefix}-rss:before { content: fa-content($fa-var-rss); } -.#{$fa-css-prefix}-hdd-o:before { content: fa-content($fa-var-hdd-o); } -.#{$fa-css-prefix}-bullhorn:before { content: fa-content($fa-var-bullhorn); } -.#{$fa-css-prefix}-bell-o:before { content: fa-content($fa-var-bell-o); } -.#{$fa-css-prefix}-certificate:before { content: fa-content($fa-var-certificate); } -.#{$fa-css-prefix}-hand-o-right:before { content: fa-content($fa-var-hand-o-right); } -.#{$fa-css-prefix}-hand-o-left:before { content: fa-content($fa-var-hand-o-left); } -.#{$fa-css-prefix}-hand-o-up:before { content: fa-content($fa-var-hand-o-up); } -.#{$fa-css-prefix}-hand-o-down:before { content: fa-content($fa-var-hand-o-down); } -.#{$fa-css-prefix}-arrow-circle-left:before { content: fa-content($fa-var-arrow-circle-left); } -.#{$fa-css-prefix}-arrow-circle-right:before { content: fa-content($fa-var-arrow-circle-right); } -.#{$fa-css-prefix}-arrow-circle-up:before { content: fa-content($fa-var-arrow-circle-up); } -.#{$fa-css-prefix}-arrow-circle-down:before { content: fa-content($fa-var-arrow-circle-down); } -.#{$fa-css-prefix}-globe:before { content: fa-content($fa-var-globe); } -.#{$fa-css-prefix}-globe-e:before { content: fa-content($fa-var-globe-e); } -.#{$fa-css-prefix}-globe-w:before { content: fa-content($fa-var-globe-w); } -.#{$fa-css-prefix}-wrench:before { content: fa-content($fa-var-wrench); } -.#{$fa-css-prefix}-tasks:before { content: fa-content($fa-var-tasks); } -.#{$fa-css-prefix}-filter:before { content: fa-content($fa-var-filter); } -.#{$fa-css-prefix}-briefcase:before { content: fa-content($fa-var-briefcase); } -.#{$fa-css-prefix}-arrows-alt:before { content: fa-content($fa-var-arrows-alt); } -.#{$fa-css-prefix}-community:before, -.#{$fa-css-prefix}-group:before, -.#{$fa-css-prefix}-users:before { content: fa-content($fa-var-users); } -.#{$fa-css-prefix}-chain:before, -.#{$fa-css-prefix}-link:before { content: fa-content($fa-var-link); } -.#{$fa-css-prefix}-cloud:before { content: fa-content($fa-var-cloud); } -.#{$fa-css-prefix}-flask:before { content: fa-content($fa-var-flask); } -.#{$fa-css-prefix}-cut:before, -.#{$fa-css-prefix}-scissors:before { content: fa-content($fa-var-scissors); } -.#{$fa-css-prefix}-copy:before, -.#{$fa-css-prefix}-files-o:before { content: fa-content($fa-var-files-o); } -.#{$fa-css-prefix}-paperclip:before { content: fa-content($fa-var-paperclip); } -.#{$fa-css-prefix}-save:before, -.#{$fa-css-prefix}-floppy-o:before { content: fa-content($fa-var-floppy-o); } -.#{$fa-css-prefix}-square:before { content: fa-content($fa-var-square); } -.#{$fa-css-prefix}-navicon:before, -.#{$fa-css-prefix}-reorder:before, -.#{$fa-css-prefix}-bars:before { content: fa-content($fa-var-bars); } -.#{$fa-css-prefix}-list-ul:before { content: fa-content($fa-var-list-ul); } -.#{$fa-css-prefix}-list-ol:before { content: fa-content($fa-var-list-ol); } -.#{$fa-css-prefix}-strikethrough:before { content: fa-content($fa-var-strikethrough); } -.#{$fa-css-prefix}-underline:before { content: fa-content($fa-var-underline); } -.#{$fa-css-prefix}-table:before { content: fa-content($fa-var-table); } -.#{$fa-css-prefix}-magic:before { content: fa-content($fa-var-magic); } -.#{$fa-css-prefix}-truck:before { content: fa-content($fa-var-truck); } -.#{$fa-css-prefix}-pinterest:before { content: fa-content($fa-var-pinterest); } -.#{$fa-css-prefix}-pinterest-square:before { content: fa-content($fa-var-pinterest-square); } -.#{$fa-css-prefix}-google-plus-square:before { content: fa-content($fa-var-google-plus-square); } -.#{$fa-css-prefix}-google-plus-g:before, -.#{$fa-css-prefix}-google-plus:before { content: fa-content($fa-var-google-plus); } -.#{$fa-css-prefix}-money:before { content: fa-content($fa-var-money); } -.#{$fa-css-prefix}-caret-down:before { content: fa-content($fa-var-caret-down); } -.#{$fa-css-prefix}-caret-up:before { content: fa-content($fa-var-caret-up); } -.#{$fa-css-prefix}-caret-left:before { content: fa-content($fa-var-caret-left); } -.#{$fa-css-prefix}-caret-right:before { content: fa-content($fa-var-caret-right); } -.#{$fa-css-prefix}-columns:before { content: fa-content($fa-var-columns); } -.#{$fa-css-prefix}-unsorted:before, -.#{$fa-css-prefix}-sort:before { content: fa-content($fa-var-sort); } -.#{$fa-css-prefix}-sort-down:before, -.#{$fa-css-prefix}-sort-desc:before { content: fa-content($fa-var-sort-desc); } -.#{$fa-css-prefix}-sort-up:before, -.#{$fa-css-prefix}-sort-asc:before { content: fa-content($fa-var-sort-asc); } -.#{$fa-css-prefix}-envelope:before { content: fa-content($fa-var-envelope); } -.#{$fa-css-prefix}-linkedin:before { content: fa-content($fa-var-linkedin); } -.#{$fa-css-prefix}-rotate-left:before, -.#{$fa-css-prefix}-undo:before { content: fa-content($fa-var-undo); } -.#{$fa-css-prefix}-legal:before, -.#{$fa-css-prefix}-gavel:before { content: fa-content($fa-var-gavel); } -.#{$fa-css-prefix}-dashboard:before, -.#{$fa-css-prefix}-tachometer:before { content: fa-content($fa-var-tachometer); } -.#{$fa-css-prefix}-comment-o:before { content: fa-content($fa-var-comment-o); } -.#{$fa-css-prefix}-comments-o:before { content: fa-content($fa-var-comments-o); } -.#{$fa-css-prefix}-flash:before, -.#{$fa-css-prefix}-bolt:before { content: fa-content($fa-var-bolt); } -.#{$fa-css-prefix}-sitemap:before { content: fa-content($fa-var-sitemap); } -.#{$fa-css-prefix}-umbrella:before { content: fa-content($fa-var-umbrella); } -.#{$fa-css-prefix}-paste:before, -.#{$fa-css-prefix}-clipboard:before { content: fa-content($fa-var-clipboard); } -.#{$fa-css-prefix}-lightbulb-o:before { content: fa-content($fa-var-lightbulb-o); } -.#{$fa-css-prefix}-exchange:before { content: fa-content($fa-var-exchange); } -.#{$fa-css-prefix}-cloud-download:before { content: fa-content($fa-var-cloud-download); } -.#{$fa-css-prefix}-cloud-upload:before { content: fa-content($fa-var-cloud-upload); } -.#{$fa-css-prefix}-user-md:before { content: fa-content($fa-var-user-md); } -.#{$fa-css-prefix}-stethoscope:before { content: fa-content($fa-var-stethoscope); } -.#{$fa-css-prefix}-suitcase:before { content: fa-content($fa-var-suitcase); } -.#{$fa-css-prefix}-bell:before { content: fa-content($fa-var-bell); } -.#{$fa-css-prefix}-coffee:before { content: fa-content($fa-var-coffee); } -.#{$fa-css-prefix}-utensils:before, -.#{$fa-css-prefix}-cutlery:before { content: fa-content($fa-var-cutlery); } -.#{$fa-css-prefix}-file-text-o:before { content: fa-content($fa-var-file-text-o); } -.#{$fa-css-prefix}-building-o:before { content: fa-content($fa-var-building-o); } -.#{$fa-css-prefix}-hospital-o:before { content: fa-content($fa-var-hospital-o); } -.#{$fa-css-prefix}-ambulance:before { content: fa-content($fa-var-ambulance); } -.#{$fa-css-prefix}-medkit:before { content: fa-content($fa-var-medkit); } -.#{$fa-css-prefix}-fighter-jet:before { content: fa-content($fa-var-fighter-jet); } -.#{$fa-css-prefix}-beer:before { content: fa-content($fa-var-beer); } -.#{$fa-css-prefix}-h-square:before { content: fa-content($fa-var-h-square); } -.#{$fa-css-prefix}-plus-square:before { content: fa-content($fa-var-plus-square); } -.#{$fa-css-prefix}-angle-double-left:before { content: fa-content($fa-var-angle-double-left); } -.#{$fa-css-prefix}-angle-double-right:before { content: fa-content($fa-var-angle-double-right); } -.#{$fa-css-prefix}-angle-double-up:before { content: fa-content($fa-var-angle-double-up); } -.#{$fa-css-prefix}-angle-double-down:before { content: fa-content($fa-var-angle-double-down); } -.#{$fa-css-prefix}-angle-left:before { content: fa-content($fa-var-angle-left); } -.#{$fa-css-prefix}-angle-right:before { content: fa-content($fa-var-angle-right); } -.#{$fa-css-prefix}-angle-up:before { content: fa-content($fa-var-angle-up); } -.#{$fa-css-prefix}-angle-down:before { content: fa-content($fa-var-angle-down); } -.#{$fa-css-prefix}-desktop:before { content: fa-content($fa-var-desktop); } -.#{$fa-css-prefix}-laptop:before { content: fa-content($fa-var-laptop); } -.#{$fa-css-prefix}-tablet:before { content: fa-content($fa-var-tablet); } -.#{$fa-css-prefix}-mobile-phone:before, -.#{$fa-css-prefix}-mobile:before { content: fa-content($fa-var-mobile); } -.#{$fa-css-prefix}-circle-o:before { content: fa-content($fa-var-circle-o); } -.#{$fa-css-prefix}-quote-left:before { content: fa-content($fa-var-quote-left); } -.#{$fa-css-prefix}-quote-right:before { content: fa-content($fa-var-quote-right); } -.#{$fa-css-prefix}-spinner:before { content: fa-content($fa-var-spinner); } -.#{$fa-css-prefix}-circle:before { content: fa-content($fa-var-circle); } -.#{$fa-css-prefix}-mail-reply:before, -.#{$fa-css-prefix}-reply:before { content: fa-content($fa-var-reply); } -.#{$fa-css-prefix}-github-alt:before { content: fa-content($fa-var-github-alt); } -.#{$fa-css-prefix}-folder-o:before { content: fa-content($fa-var-folder-o); } -.#{$fa-css-prefix}-folder-open-o:before { content: fa-content($fa-var-folder-open-o); } -.#{$fa-css-prefix}-smile-o:before { content: fa-content($fa-var-smile-o); } -.#{$fa-css-prefix}-frown-o:before { content: fa-content($fa-var-frown-o); } -.#{$fa-css-prefix}-meh-o:before { content: fa-content($fa-var-meh-o); } -.#{$fa-css-prefix}-gamepad:before { content: fa-content($fa-var-gamepad); } -.#{$fa-css-prefix}-keyboard-o:before { content: fa-content($fa-var-keyboard-o); } -.#{$fa-css-prefix}-flag-o:before { content: fa-content($fa-var-flag-o); } -.#{$fa-css-prefix}-flag-checkered:before { content: fa-content($fa-var-flag-checkered); } -.#{$fa-css-prefix}-terminal:before { content: fa-content($fa-var-terminal); } -.#{$fa-css-prefix}-code:before { content: fa-content($fa-var-code); } -.#{$fa-css-prefix}-mail-reply-all:before, -.#{$fa-css-prefix}-reply-all:before { content: fa-content($fa-var-reply-all); } -.#{$fa-css-prefix}-star-half-empty:before, -.#{$fa-css-prefix}-star-half-full:before, -.#{$fa-css-prefix}-star-half-o:before { content: fa-content($fa-var-star-half-o); } -.#{$fa-css-prefix}-location-arrow:before { content: fa-content($fa-var-location-arrow); } -.#{$fa-css-prefix}-crop:before { content: fa-content($fa-var-crop); } -.#{$fa-css-prefix}-code-fork:before { content: fa-content($fa-var-code-fork); } -.#{$fa-css-prefix}-unlink:before, -.#{$fa-css-prefix}-chain-broken:before { content: fa-content($fa-var-chain-broken); } -.#{$fa-css-prefix}-question:before { content: fa-content($fa-var-question); } -.#{$fa-css-prefix}-info:before { content: fa-content($fa-var-info); } -.#{$fa-css-prefix}-exclamation:before { content: fa-content($fa-var-exclamation); } -.#{$fa-css-prefix}-superscript:before { content: fa-content($fa-var-superscript); } -.#{$fa-css-prefix}-subscript:before { content: fa-content($fa-var-subscript); } -.#{$fa-css-prefix}-eraser:before { content: fa-content($fa-var-eraser); } -.#{$fa-css-prefix}-puzzle-piece:before { content: fa-content($fa-var-puzzle-piece); } -.#{$fa-css-prefix}-microphone:before { content: fa-content($fa-var-microphone); } -.#{$fa-css-prefix}-microphone-slash:before { content: fa-content($fa-var-microphone-slash); } -.#{$fa-css-prefix}-shield:before { content: fa-content($fa-var-shield); } -.#{$fa-css-prefix}-calendar-o:before { content: fa-content($fa-var-calendar-o); } -.#{$fa-css-prefix}-fire-extinguisher:before { content: fa-content($fa-var-fire-extinguisher); } -.#{$fa-css-prefix}-rocket:before { content: fa-content($fa-var-rocket); } -.#{$fa-css-prefix}-maxcdn:before { content: fa-content($fa-var-maxcdn); } -.#{$fa-css-prefix}-chevron-circle-left:before { content: fa-content($fa-var-chevron-circle-left); } -.#{$fa-css-prefix}-chevron-circle-right:before { content: fa-content($fa-var-chevron-circle-right); } -.#{$fa-css-prefix}-chevron-circle-up:before { content: fa-content($fa-var-chevron-circle-up); } -.#{$fa-css-prefix}-chevron-circle-down:before { content: fa-content($fa-var-chevron-circle-down); } -.#{$fa-css-prefix}-html5:before { content: fa-content($fa-var-html5); } -.#{$fa-css-prefix}-css3:before { content: fa-content($fa-var-css3); } -.#{$fa-css-prefix}-anchor:before { content: fa-content($fa-var-anchor); } -.#{$fa-css-prefix}-unlock-alt:before { content: fa-content($fa-var-unlock-alt); } -.#{$fa-css-prefix}-bullseye:before { content: fa-content($fa-var-bullseye); } -.#{$fa-css-prefix}-ellipsis-h:before { content: fa-content($fa-var-ellipsis-h); } -.#{$fa-css-prefix}-ellipsis-v:before { content: fa-content($fa-var-ellipsis-v); } -.#{$fa-css-prefix}-rss-square:before { content: fa-content($fa-var-rss-square); } -.#{$fa-css-prefix}-play-circle:before { content: fa-content($fa-var-play-circle); } -.#{$fa-css-prefix}-ticket:before { content: fa-content($fa-var-ticket); } -.#{$fa-css-prefix}-minus-square:before { content: fa-content($fa-var-minus-square); } -.#{$fa-css-prefix}-minus-square-o:before { content: fa-content($fa-var-minus-square-o); } -.#{$fa-css-prefix}-level-up:before { content: fa-content($fa-var-level-up); } -.#{$fa-css-prefix}-level-down:before { content: fa-content($fa-var-level-down); } -.#{$fa-css-prefix}-check-square:before { content: fa-content($fa-var-check-square); } -.#{$fa-css-prefix}-pencil-square:before { content: fa-content($fa-var-pencil-square); } -.#{$fa-css-prefix}-external-link-square:before { content: fa-content($fa-var-external-link-square); } -.#{$fa-css-prefix}-share-square:before { content: fa-content($fa-var-share-square); } -.#{$fa-css-prefix}-compass:before { content: fa-content($fa-var-compass); } -.#{$fa-css-prefix}-toggle-down:before, -.#{$fa-css-prefix}-caret-square-o-down:before { content: fa-content($fa-var-caret-square-o-down); } -.#{$fa-css-prefix}-toggle-up:before, -.#{$fa-css-prefix}-caret-square-o-up:before { content: fa-content($fa-var-caret-square-o-up); } -.#{$fa-css-prefix}-toggle-right:before, -.#{$fa-css-prefix}-caret-square-o-right:before { content: fa-content($fa-var-caret-square-o-right); } -.#{$fa-css-prefix}-euro:before, -.#{$fa-css-prefix}-eur:before { content: fa-content($fa-var-eur); } -.#{$fa-css-prefix}-pound:before, -.#{$fa-css-prefix}-gbp:before { content: fa-content($fa-var-gbp); } -.#{$fa-css-prefix}-dollar:before, -.#{$fa-css-prefix}-usd:before { content: fa-content($fa-var-usd); } -.#{$fa-css-prefix}-rupee:before, -.#{$fa-css-prefix}-inr:before { content: fa-content($fa-var-inr); } -.#{$fa-css-prefix}-cny:before, -.#{$fa-css-prefix}-rmb:before, -.#{$fa-css-prefix}-yen:before, -.#{$fa-css-prefix}-jpy:before { content: fa-content($fa-var-jpy); } -.#{$fa-css-prefix}-ruble:before, -.#{$fa-css-prefix}-rouble:before, -.#{$fa-css-prefix}-rub:before { content: fa-content($fa-var-rub); } -.#{$fa-css-prefix}-won:before, -.#{$fa-css-prefix}-krw:before { content: fa-content($fa-var-krw); } -.#{$fa-css-prefix}-bitcoin:before, -.#{$fa-css-prefix}-btc:before { content: fa-content($fa-var-btc); } -.#{$fa-css-prefix}-file:before { content: fa-content($fa-var-file); } -.#{$fa-css-prefix}-file-text:before { content: fa-content($fa-var-file-text); } -.#{$fa-css-prefix}-sort-alpha-down:before, -.#{$fa-css-prefix}-sort-alpha-asc:before { content: fa-content($fa-var-sort-alpha-asc); } -.#{$fa-css-prefix}-sort-alpha-up:before, -.#{$fa-css-prefix}-sort-alpha-desc:before { content: fa-content($fa-var-sort-alpha-desc); } -.#{$fa-css-prefix}-sort-amount-down:before, -.#{$fa-css-prefix}-sort-amount-asc:before { content: fa-content($fa-var-sort-amount-asc); } -.#{$fa-css-prefix}-sort-amount-up:before, -.#{$fa-css-prefix}-sort-amount-desc:before { content: fa-content($fa-var-sort-amount-desc); } -.#{$fa-css-prefix}-sort-numeric-down:before, -.#{$fa-css-prefix}-sort-numeric-asc:before { content: fa-content($fa-var-sort-numeric-asc); } -.#{$fa-css-prefix}-sort-numeric-up:before, -.#{$fa-css-prefix}-sort-numeric-desc:before { content: fa-content($fa-var-sort-numeric-desc); } -.#{$fa-css-prefix}-thumbs-up:before { content: fa-content($fa-var-thumbs-up); } -.#{$fa-css-prefix}-thumbs-down:before { content: fa-content($fa-var-thumbs-down); } -.#{$fa-css-prefix}-youtube-square:before { content: fa-content($fa-var-youtube-square); } -.#{$fa-css-prefix}-youtube:before { content: fa-content($fa-var-youtube); } -.#{$fa-css-prefix}-xing:before { content: fa-content($fa-var-xing); } -.#{$fa-css-prefix}-xing-square:before { content: fa-content($fa-var-xing-square); } -.#{$fa-css-prefix}-youtube-play:before { content: fa-content($fa-var-youtube-play); } -.#{$fa-css-prefix}-dropbox:before { content: fa-content($fa-var-dropbox); } -.#{$fa-css-prefix}-stack-overflow:before { content: fa-content($fa-var-stack-overflow); } -.#{$fa-css-prefix}-instagram:before { content: fa-content($fa-var-instagram); } -.#{$fa-css-prefix}-flickr:before { content: fa-content($fa-var-flickr); } -.#{$fa-css-prefix}-adn:before { content: fa-content($fa-var-adn); } -.#{$fa-css-prefix}-bitbucket:before { content: fa-content($fa-var-bitbucket); } -.#{$fa-css-prefix}-bitbucket-square:before { content: fa-content($fa-var-bitbucket-square); } -.#{$fa-css-prefix}-tumblr:before { content: fa-content($fa-var-tumblr); } -.#{$fa-css-prefix}-tumblr-square:before { content: fa-content($fa-var-tumblr-square); } -.#{$fa-css-prefix}-long-arrow-down:before { content: fa-content($fa-var-long-arrow-down); } -.#{$fa-css-prefix}-long-arrow-up:before { content: fa-content($fa-var-long-arrow-up); } -.#{$fa-css-prefix}-long-arrow-left:before { content: fa-content($fa-var-long-arrow-left); } -.#{$fa-css-prefix}-long-arrow-right:before { content: fa-content($fa-var-long-arrow-right); } -.#{$fa-css-prefix}-apple:before { content: fa-content($fa-var-apple); } -.#{$fa-css-prefix}-windows:before { content: fa-content($fa-var-windows); } -.#{$fa-css-prefix}-android:before { content: fa-content($fa-var-android); } -.#{$fa-css-prefix}-linux:before { content: fa-content($fa-var-linux); } -.#{$fa-css-prefix}-dribbble:before { content: fa-content($fa-var-dribbble); } -.#{$fa-css-prefix}-skype:before { content: fa-content($fa-var-skype); } -.#{$fa-css-prefix}-foursquare:before { content: fa-content($fa-var-foursquare); } -.#{$fa-css-prefix}-trello:before { content: fa-content($fa-var-trello); } -.#{$fa-css-prefix}-female:before { content: fa-content($fa-var-female); } -.#{$fa-css-prefix}-male:before { content: fa-content($fa-var-male); } -.#{$fa-css-prefix}-gittip:before, -.#{$fa-css-prefix}-gratipay:before { content: fa-content($fa-var-gratipay); } -.#{$fa-css-prefix}-sun-o:before { content: fa-content($fa-var-sun-o); } -.#{$fa-css-prefix}-moon-o:before { content: fa-content($fa-var-moon-o); } -.#{$fa-css-prefix}-archive:before { content: fa-content($fa-var-archive); } -.#{$fa-css-prefix}-bug:before { content: fa-content($fa-var-bug); } -.#{$fa-css-prefix}-vk:before { content: fa-content($fa-var-vk); } -.#{$fa-css-prefix}-weibo:before { content: fa-content($fa-var-weibo); } -.#{$fa-css-prefix}-renren:before { content: fa-content($fa-var-renren); } -.#{$fa-css-prefix}-pagelines:before { content: fa-content($fa-var-pagelines); } -.#{$fa-css-prefix}-stack-exchange:before { content: fa-content($fa-var-stack-exchange); } -.#{$fa-css-prefix}-arrow-circle-o-right:before { content: fa-content($fa-var-arrow-circle-o-right); } -.#{$fa-css-prefix}-arrow-circle-o-left:before { content: fa-content($fa-var-arrow-circle-o-left); } -.#{$fa-css-prefix}-toggle-left:before, -.#{$fa-css-prefix}-caret-square-o-left:before { content: fa-content($fa-var-caret-square-o-left); } -.#{$fa-css-prefix}-dot-circle-o:before { content: fa-content($fa-var-dot-circle-o); } -.#{$fa-css-prefix}-wheelchair:before { content: fa-content($fa-var-wheelchair); } -.#{$fa-css-prefix}-vimeo-square:before { content: fa-content($fa-var-vimeo-square); } -.#{$fa-css-prefix}-turkish-lira:before, -.#{$fa-css-prefix}-try:before { content: fa-content($fa-var-try); } -.#{$fa-css-prefix}-plus-square-o:before { content: fa-content($fa-var-plus-square-o); } -.#{$fa-css-prefix}-space-shuttle:before { content: fa-content($fa-var-space-shuttle); } -.#{$fa-css-prefix}-slack:before { content: fa-content($fa-var-slack); } -.#{$fa-css-prefix}-envelope-square:before { content: fa-content($fa-var-envelope-square); } -.#{$fa-css-prefix}-wordpress:before { content: fa-content($fa-var-wordpress); } -.#{$fa-css-prefix}-openid:before { content: fa-content($fa-var-openid); } -.#{$fa-css-prefix}-institution:before, -.#{$fa-css-prefix}-bank:before, -.#{$fa-css-prefix}-university:before { content: fa-content($fa-var-university); } -.#{$fa-css-prefix}-mortar-board:before, -.#{$fa-css-prefix}-graduation-cap:before { content: fa-content($fa-var-graduation-cap); } -.#{$fa-css-prefix}-yahoo:before { content: fa-content($fa-var-yahoo); } -.#{$fa-css-prefix}-google:before { content: fa-content($fa-var-google); } -.#{$fa-css-prefix}-reddit:before { content: fa-content($fa-var-reddit); } -.#{$fa-css-prefix}-reddit-square:before { content: fa-content($fa-var-reddit-square); } -.#{$fa-css-prefix}-stumbleupon-circle:before { content: fa-content($fa-var-stumbleupon-circle); } -.#{$fa-css-prefix}-stumbleupon:before { content: fa-content($fa-var-stumbleupon); } -.#{$fa-css-prefix}-delicious:before { content: fa-content($fa-var-delicious); } -.#{$fa-css-prefix}-digg:before { content: fa-content($fa-var-digg); } -.#{$fa-css-prefix}-drupal:before { content: fa-content($fa-var-drupal); } -.#{$fa-css-prefix}-joomla:before { content: fa-content($fa-var-joomla); } -.#{$fa-css-prefix}-language:before { content: fa-content($fa-var-language); } -.#{$fa-css-prefix}-fax:before { content: fa-content($fa-var-fax); } -.#{$fa-css-prefix}-building:before { content: fa-content($fa-var-building); } -.#{$fa-css-prefix}-child:before { content: fa-content($fa-var-child); } -.#{$fa-css-prefix}-paw:before { content: fa-content($fa-var-paw); } -.#{$fa-css-prefix}-utensil-spoon:before, -.#{$fa-css-prefix}-spoon:before { content: fa-content($fa-var-spoon); } -.#{$fa-css-prefix}-cube:before { content: fa-content($fa-var-cube); } -.#{$fa-css-prefix}-cubes:before { content: fa-content($fa-var-cubes); } -.#{$fa-css-prefix}-behance:before { content: fa-content($fa-var-behance); } -.#{$fa-css-prefix}-behance-square:before { content: fa-content($fa-var-behance-square); } -.#{$fa-css-prefix}-steam:before { content: fa-content($fa-var-steam); } -.#{$fa-css-prefix}-steam-square:before { content: fa-content($fa-var-steam-square); } -.#{$fa-css-prefix}-recycle:before { content: fa-content($fa-var-recycle); } -.#{$fa-css-prefix}-automobile:before, -.#{$fa-css-prefix}-car:before { content: fa-content($fa-var-car); } -.#{$fa-css-prefix}-cab:before, -.#{$fa-css-prefix}-taxi:before { content: fa-content($fa-var-taxi); } -.#{$fa-css-prefix}-tree:before { content: fa-content($fa-var-tree); } -.#{$fa-css-prefix}-spotify:before { content: fa-content($fa-var-spotify); } -.#{$fa-css-prefix}-deviantart:before { content: fa-content($fa-var-deviantart); } -.#{$fa-css-prefix}-soundcloud:before { content: fa-content($fa-var-soundcloud); } -.#{$fa-css-prefix}-database:before { content: fa-content($fa-var-database); } -.#{$fa-css-prefix}-file-pdf-o:before { content: fa-content($fa-var-file-pdf-o); } -.#{$fa-css-prefix}-file-word-o:before { content: fa-content($fa-var-file-word-o); } -.#{$fa-css-prefix}-file-excel-o:before { content: fa-content($fa-var-file-excel-o); } -.#{$fa-css-prefix}-file-powerpoint-o:before { content: fa-content($fa-var-file-powerpoint-o); } -.#{$fa-css-prefix}-file-photo-o:before, -.#{$fa-css-prefix}-file-picture-o:before, -.#{$fa-css-prefix}-file-image-o:before { content: fa-content($fa-var-file-image-o); } -.#{$fa-css-prefix}-file-zip-o:before, -.#{$fa-css-prefix}-file-archive-o:before { content: fa-content($fa-var-file-archive-o); } -.#{$fa-css-prefix}-file-sound-o:before, -.#{$fa-css-prefix}-file-audio-o:before { content: fa-content($fa-var-file-audio-o); } -.#{$fa-css-prefix}-file-movie-o:before, -.#{$fa-css-prefix}-file-video-o:before { content: fa-content($fa-var-file-video-o); } -.#{$fa-css-prefix}-file-code-o:before { content: fa-content($fa-var-file-code-o); } -.#{$fa-css-prefix}-vine:before { content: fa-content($fa-var-vine); } -.#{$fa-css-prefix}-codepen:before { content: fa-content($fa-var-codepen); } -.#{$fa-css-prefix}-jsfiddle:before { content: fa-content($fa-var-jsfiddle); } -.#{$fa-css-prefix}-life-bouy:before, -.#{$fa-css-prefix}-life-buoy:before, -.#{$fa-css-prefix}-life-saver:before, -.#{$fa-css-prefix}-support:before, -.#{$fa-css-prefix}-life-ring:before { content: fa-content($fa-var-life-ring); } -.#{$fa-css-prefix}-circle-o-notch:before { content: fa-content($fa-var-circle-o-notch); } -.#{$fa-css-prefix}-ra:before, -.#{$fa-css-prefix}-resistance:before, -.#{$fa-css-prefix}-rebel:before { content: fa-content($fa-var-rebel); } -.#{$fa-css-prefix}-ge:before, -.#{$fa-css-prefix}-empire:before { content: fa-content($fa-var-empire); } -.#{$fa-css-prefix}-git-square:before { content: fa-content($fa-var-git-square); } -.#{$fa-css-prefix}-git:before { content: fa-content($fa-var-git); } -.#{$fa-css-prefix}-y-combinator-square:before, -.#{$fa-css-prefix}-yc-square:before, -.#{$fa-css-prefix}-hacker-news:before { content: fa-content($fa-var-hacker-news); } -.#{$fa-css-prefix}-tencent-weibo:before { content: fa-content($fa-var-tencent-weibo); } -.#{$fa-css-prefix}-qq:before { content: fa-content($fa-var-qq); } -.#{$fa-css-prefix}-wechat:before, -.#{$fa-css-prefix}-weixin:before { content: fa-content($fa-var-weixin); } -.#{$fa-css-prefix}-send:before, -.#{$fa-css-prefix}-paper-plane:before { content: fa-content($fa-var-paper-plane); } -.#{$fa-css-prefix}-send-o:before, -.#{$fa-css-prefix}-paper-plane-o:before { content: fa-content($fa-var-paper-plane-o); } -.#{$fa-css-prefix}-history:before { content: fa-content($fa-var-history); } -.#{$fa-css-prefix}-circle-thin:before { content: fa-content($fa-var-circle-thin); } -.#{$fa-css-prefix}-heading:before, -.#{$fa-css-prefix}-header:before { content: fa-content($fa-var-header); } -.#{$fa-css-prefix}-paragraph:before { content: fa-content($fa-var-paragraph); } -.#{$fa-css-prefix}-sliders:before { content: fa-content($fa-var-sliders); } -.#{$fa-css-prefix}-share-alt:before { content: fa-content($fa-var-share-alt); } -.#{$fa-css-prefix}-share-alt-square:before { content: fa-content($fa-var-share-alt-square); } -.#{$fa-css-prefix}-bomb:before { content: fa-content($fa-var-bomb); } -.#{$fa-css-prefix}-soccer-ball-o:before, -.#{$fa-css-prefix}-futbol-o:before { content: fa-content($fa-var-futbol-o); } -.#{$fa-css-prefix}-tty:before { content: fa-content($fa-var-tty); } -.#{$fa-css-prefix}-binoculars:before { content: fa-content($fa-var-binoculars); } -.#{$fa-css-prefix}-plug:before { content: fa-content($fa-var-plug); } -.#{$fa-css-prefix}-slideshare:before { content: fa-content($fa-var-slideshare); } -.#{$fa-css-prefix}-twitch:before { content: fa-content($fa-var-twitch); } -.#{$fa-css-prefix}-yelp:before { content: fa-content($fa-var-yelp); } -.#{$fa-css-prefix}-newspaper-o:before { content: fa-content($fa-var-newspaper-o); } -.#{$fa-css-prefix}-wifi:before { content: fa-content($fa-var-wifi); } -.#{$fa-css-prefix}-calculator:before { content: fa-content($fa-var-calculator); } -.#{$fa-css-prefix}-paypal:before { content: fa-content($fa-var-paypal); } -.#{$fa-css-prefix}-google-wallet:before { content: fa-content($fa-var-google-wallet); } -.#{$fa-css-prefix}-cc-visa:before { content: fa-content($fa-var-cc-visa); } -.#{$fa-css-prefix}-cc-mastercard:before { content: fa-content($fa-var-cc-mastercard); } -.#{$fa-css-prefix}-cc-discover:before { content: fa-content($fa-var-cc-discover); } -.#{$fa-css-prefix}-cc-amex:before { content: fa-content($fa-var-cc-amex); } -.#{$fa-css-prefix}-cc-paypal:before { content: fa-content($fa-var-cc-paypal); } -.#{$fa-css-prefix}-cc-stripe:before { content: fa-content($fa-var-cc-stripe); } -.#{$fa-css-prefix}-bell-slash:before { content: fa-content($fa-var-bell-slash); } -.#{$fa-css-prefix}-bell-slash-o:before { content: fa-content($fa-var-bell-slash-o); } -.#{$fa-css-prefix}-trash:before { content: fa-content($fa-var-trash); } -.#{$fa-css-prefix}-copyright:before { content: fa-content($fa-var-copyright); } -.#{$fa-css-prefix}-at:before { content: fa-content($fa-var-at); } -.#{$fa-css-prefix}-eyedropper:before { content: fa-content($fa-var-eyedropper); } -.#{$fa-css-prefix}-paint-brush:before { content: fa-content($fa-var-paint-brush); } -.#{$fa-css-prefix}-birthday-cake:before { content: fa-content($fa-var-birthday-cake); } -.#{$fa-css-prefix}-area-chart:before { content: fa-content($fa-var-area-chart); } -.#{$fa-css-prefix}-pie-chart:before { content: fa-content($fa-var-pie-chart); } -.#{$fa-css-prefix}-line-chart:before { content: fa-content($fa-var-line-chart); } -.#{$fa-css-prefix}-lastfm:before { content: fa-content($fa-var-lastfm); } -.#{$fa-css-prefix}-lastfm-square:before { content: fa-content($fa-var-lastfm-square); } -.#{$fa-css-prefix}-toggle-off:before { content: fa-content($fa-var-toggle-off); } -.#{$fa-css-prefix}-toggle-on:before { content: fa-content($fa-var-toggle-on); } -.#{$fa-css-prefix}-bicycle:before { content: fa-content($fa-var-bicycle); } -.#{$fa-css-prefix}-bus:before { content: fa-content($fa-var-bus); } -.#{$fa-css-prefix}-ioxhost:before { content: fa-content($fa-var-ioxhost); } -.#{$fa-css-prefix}-angellist:before { content: fa-content($fa-var-angellist); } -.#{$fa-css-prefix}-closed-captioning:before, -.#{$fa-css-prefix}-cc:before { content: fa-content($fa-var-cc); } -.#{$fa-css-prefix}-shekel:before, -.#{$fa-css-prefix}-sheqel:before, -.#{$fa-css-prefix}-ils:before { content: fa-content($fa-var-ils); } -.#{$fa-css-prefix}-meanpath:before { content: fa-content($fa-var-meanpath); } -.#{$fa-css-prefix}-buysellads:before { content: fa-content($fa-var-buysellads); } -.#{$fa-css-prefix}-connectdevelop:before { content: fa-content($fa-var-connectdevelop); } -.#{$fa-css-prefix}-dashcube:before { content: fa-content($fa-var-dashcube); } -.#{$fa-css-prefix}-forumbee:before { content: fa-content($fa-var-forumbee); } -.#{$fa-css-prefix}-leanpub:before { content: fa-content($fa-var-leanpub); } -.#{$fa-css-prefix}-sellsy:before { content: fa-content($fa-var-sellsy); } -.#{$fa-css-prefix}-shirtsinbulk:before { content: fa-content($fa-var-shirtsinbulk); } -.#{$fa-css-prefix}-simplybuilt:before { content: fa-content($fa-var-simplybuilt); } -.#{$fa-css-prefix}-skyatlas:before { content: fa-content($fa-var-skyatlas); } -.#{$fa-css-prefix}-cart-plus:before { content: fa-content($fa-var-cart-plus); } -.#{$fa-css-prefix}-cart-arrow-down:before { content: fa-content($fa-var-cart-arrow-down); } -.#{$fa-css-prefix}-gem:before, -.#{$fa-css-prefix}-diamond:before { content: fa-content($fa-var-diamond); } -.#{$fa-css-prefix}-ship:before { content: fa-content($fa-var-ship); } -.#{$fa-css-prefix}-user-secret:before { content: fa-content($fa-var-user-secret); } -.#{$fa-css-prefix}-motorcycle:before { content: fa-content($fa-var-motorcycle); } -.#{$fa-css-prefix}-street-view:before { content: fa-content($fa-var-street-view); } -.#{$fa-css-prefix}-heartbeat:before { content: fa-content($fa-var-heartbeat); } -.#{$fa-css-prefix}-venus:before { content: fa-content($fa-var-venus); } -.#{$fa-css-prefix}-mars:before { content: fa-content($fa-var-mars); } -.#{$fa-css-prefix}-mercury:before { content: fa-content($fa-var-mercury); } -.#{$fa-css-prefix}-intersex:before, -.#{$fa-css-prefix}-transgender:before { content: fa-content($fa-var-transgender); } -.#{$fa-css-prefix}-transgender-alt:before { content: fa-content($fa-var-transgender-alt); } -.#{$fa-css-prefix}-venus-double:before { content: fa-content($fa-var-venus-double); } -.#{$fa-css-prefix}-mars-double:before { content: fa-content($fa-var-mars-double); } -.#{$fa-css-prefix}-venus-mars:before { content: fa-content($fa-var-venus-mars); } -.#{$fa-css-prefix}-mars-stroke:before { content: fa-content($fa-var-mars-stroke); } -.#{$fa-css-prefix}-mars-stroke-v:before { content: fa-content($fa-var-mars-stroke-v); } -.#{$fa-css-prefix}-mars-stroke-h:before { content: fa-content($fa-var-mars-stroke-h); } -.#{$fa-css-prefix}-neuter:before { content: fa-content($fa-var-neuter); } -.#{$fa-css-prefix}-genderless:before { content: fa-content($fa-var-genderless); } -.#{$fa-css-prefix}-facebook-official:before { content: fa-content($fa-var-facebook-official); } -.#{$fa-css-prefix}-pinterest-p:before { content: fa-content($fa-var-pinterest-p); } -.#{$fa-css-prefix}-whatsapp:before { content: fa-content($fa-var-whatsapp); } -.#{$fa-css-prefix}-server:before { content: fa-content($fa-var-server); } -.#{$fa-css-prefix}-user-plus:before { content: fa-content($fa-var-user-plus); } -.#{$fa-css-prefix}-user-times:before { content: fa-content($fa-var-user-times); } -.#{$fa-css-prefix}-hotel:before, -.#{$fa-css-prefix}-bed:before { content: fa-content($fa-var-bed); } -.#{$fa-css-prefix}-viacoin:before { content: fa-content($fa-var-viacoin); } -.#{$fa-css-prefix}-train:before { content: fa-content($fa-var-train); } -.#{$fa-css-prefix}-subway:before { content: fa-content($fa-var-subway); } -.#{$fa-css-prefix}-medium:before { content: fa-content($fa-var-medium); } -.#{$fa-css-prefix}-medium-square:before { content: fa-content($fa-var-medium-square); } -.#{$fa-css-prefix}-yc:before, -.#{$fa-css-prefix}-y-combinator:before { content: fa-content($fa-var-y-combinator); } -.#{$fa-css-prefix}-optin-monster:before { content: fa-content($fa-var-optin-monster); } -.#{$fa-css-prefix}-opencart:before { content: fa-content($fa-var-opencart); } -.#{$fa-css-prefix}-expeditedssl:before { content: fa-content($fa-var-expeditedssl); } -.#{$fa-css-prefix}-battery-4:before, -.#{$fa-css-prefix}-battery:before, -.#{$fa-css-prefix}-battery-full:before { content: fa-content($fa-var-battery-full); } -.#{$fa-css-prefix}-battery-3:before, -.#{$fa-css-prefix}-battery-three-quarters:before { content: fa-content($fa-var-battery-three-quarters); } -.#{$fa-css-prefix}-battery-2:before, -.#{$fa-css-prefix}-battery-half:before { content: fa-content($fa-var-battery-half); } -.#{$fa-css-prefix}-battery-1:before, -.#{$fa-css-prefix}-battery-quarter:before { content: fa-content($fa-var-battery-quarter); } -.#{$fa-css-prefix}-battery-0:before, -.#{$fa-css-prefix}-battery-empty:before { content: fa-content($fa-var-battery-empty); } -.#{$fa-css-prefix}-mouse-pointer:before { content: fa-content($fa-var-mouse-pointer); } -.#{$fa-css-prefix}-i-cursor:before { content: fa-content($fa-var-i-cursor); } -.#{$fa-css-prefix}-object-group:before { content: fa-content($fa-var-object-group); } -.#{$fa-css-prefix}-object-ungroup:before { content: fa-content($fa-var-object-ungroup); } -.#{$fa-css-prefix}-sticky-note:before { content: fa-content($fa-var-sticky-note); } -.#{$fa-css-prefix}-sticky-note-o:before { content: fa-content($fa-var-sticky-note-o); } -.#{$fa-css-prefix}-cc-jcb:before { content: fa-content($fa-var-cc-jcb); } -.#{$fa-css-prefix}-cc-diners-club:before { content: fa-content($fa-var-cc-diners-club); } -.#{$fa-css-prefix}-clone:before { content: fa-content($fa-var-clone); } -.#{$fa-css-prefix}-balance-scale:before { content: fa-content($fa-var-balance-scale); } -.#{$fa-css-prefix}-hourglass-o:before { content: fa-content($fa-var-hourglass-o); } -.#{$fa-css-prefix}-hourglass-1:before, -.#{$fa-css-prefix}-hourglass-start:before { content: fa-content($fa-var-hourglass-start); } -.#{$fa-css-prefix}-hourglass-2:before, -.#{$fa-css-prefix}-hourglass-half:before { content: fa-content($fa-var-hourglass-half); } -.#{$fa-css-prefix}-hourglass-3:before, -.#{$fa-css-prefix}-hourglass-end:before { content: fa-content($fa-var-hourglass-end); } -.#{$fa-css-prefix}-hourglass:before { content: fa-content($fa-var-hourglass); } -.#{$fa-css-prefix}-hand-grab-o:before, -.#{$fa-css-prefix}-hand-rock-o:before { content: fa-content($fa-var-hand-rock-o); } -.#{$fa-css-prefix}-hand-stop-o:before, -.#{$fa-css-prefix}-hand-paper-o:before { content: fa-content($fa-var-hand-paper-o); } -.#{$fa-css-prefix}-hand-scissors-o:before { content: fa-content($fa-var-hand-scissors-o); } -.#{$fa-css-prefix}-hand-lizard-o:before { content: fa-content($fa-var-hand-lizard-o); } -.#{$fa-css-prefix}-hand-spock-o:before { content: fa-content($fa-var-hand-spock-o); } -.#{$fa-css-prefix}-hand-pointer-o:before { content: fa-content($fa-var-hand-pointer-o); } -.#{$fa-css-prefix}-hand-peace-o:before { content: fa-content($fa-var-hand-peace-o); } -.#{$fa-css-prefix}-trademark:before { content: fa-content($fa-var-trademark); } -.#{$fa-css-prefix}-registered:before { content: fa-content($fa-var-registered); } -.#{$fa-css-prefix}-creative-commons:before { content: fa-content($fa-var-creative-commons); } -.#{$fa-css-prefix}-gg:before { content: fa-content($fa-var-gg); } -.#{$fa-css-prefix}-gg-circle:before { content: fa-content($fa-var-gg-circle); } -.#{$fa-css-prefix}-tripadvisor:before { content: fa-content($fa-var-tripadvisor); } -.#{$fa-css-prefix}-odnoklassniki:before { content: fa-content($fa-var-odnoklassniki); } -.#{$fa-css-prefix}-odnoklassniki-square:before { content: fa-content($fa-var-odnoklassniki-square); } -.#{$fa-css-prefix}-get-pocket:before { content: fa-content($fa-var-get-pocket); } -.#{$fa-css-prefix}-wikipedia-w:before { content: fa-content($fa-var-wikipedia-w); } -.#{$fa-css-prefix}-safari:before { content: fa-content($fa-var-safari); } -.#{$fa-css-prefix}-chrome:before { content: fa-content($fa-var-chrome); } -.#{$fa-css-prefix}-firefox:before { content: fa-content($fa-var-firefox); } -.#{$fa-css-prefix}-opera:before { content: fa-content($fa-var-opera); } -.#{$fa-css-prefix}-internet-explorer:before { content: fa-content($fa-var-internet-explorer); } -.#{$fa-css-prefix}-tv:before, -.#{$fa-css-prefix}-television:before { content: fa-content($fa-var-television); } -.#{$fa-css-prefix}-contao:before { content: fa-content($fa-var-contao); } -.#{$fa-css-prefix}-500px:before { content: fa-content($fa-var-500px); } -.#{$fa-css-prefix}-amazon:before { content: fa-content($fa-var-amazon); } -.#{$fa-css-prefix}-calendar-plus-o:before { content: fa-content($fa-var-calendar-plus-o); } -.#{$fa-css-prefix}-calendar-minus-o:before { content: fa-content($fa-var-calendar-minus-o); } -.#{$fa-css-prefix}-calendar-times-o:before { content: fa-content($fa-var-calendar-times-o); } -.#{$fa-css-prefix}-calendar-check-o:before { content: fa-content($fa-var-calendar-check-o); } -.#{$fa-css-prefix}-industry:before { content: fa-content($fa-var-industry); } -.#{$fa-css-prefix}-map-pin:before { content: fa-content($fa-var-map-pin); } -.#{$fa-css-prefix}-map-signs:before { content: fa-content($fa-var-map-signs); } -.#{$fa-css-prefix}-map-o:before { content: fa-content($fa-var-map-o); } -.#{$fa-css-prefix}-map:before { content: fa-content($fa-var-map); } -.#{$fa-css-prefix}-commenting:before { content: fa-content($fa-var-commenting); } -.#{$fa-css-prefix}-commenting-o:before { content: fa-content($fa-var-commenting-o); } -.#{$fa-css-prefix}-houzz:before { content: fa-content($fa-var-houzz); } -.#{$fa-css-prefix}-vimeo-v:before, -.#{$fa-css-prefix}-vimeo:before { content: fa-content($fa-var-vimeo); } -.#{$fa-css-prefix}-black-tie:before { content: fa-content($fa-var-black-tie); } -.#{$fa-css-prefix}-fonticons:before { content: fa-content($fa-var-fonticons); } -.#{$fa-css-prefix}-reddit-alien:before { content: fa-content($fa-var-reddit-alien); } -.#{$fa-css-prefix}-edge:before { content: fa-content($fa-var-edge); } -.#{$fa-css-prefix}-credit-card-alt:before { content: fa-content($fa-var-credit-card-alt); } -.#{$fa-css-prefix}-codiepie:before { content: fa-content($fa-var-codiepie); } -.#{$fa-css-prefix}-modx:before { content: fa-content($fa-var-modx); } -.#{$fa-css-prefix}-fort-awesome:before { content: fa-content($fa-var-fort-awesome); } -.#{$fa-css-prefix}-usb:before { content: fa-content($fa-var-usb); } -.#{$fa-css-prefix}-product-hunt:before { content: fa-content($fa-var-product-hunt); } -.#{$fa-css-prefix}-mixcloud:before { content: fa-content($fa-var-mixcloud); } -.#{$fa-css-prefix}-scribd:before { content: fa-content($fa-var-scribd); } -.#{$fa-css-prefix}-pause-circle:before { content: fa-content($fa-var-pause-circle); } -.#{$fa-css-prefix}-pause-circle-o:before { content: fa-content($fa-var-pause-circle-o); } -.#{$fa-css-prefix}-stop-circle:before { content: fa-content($fa-var-stop-circle); } -.#{$fa-css-prefix}-stop-circle-o:before { content: fa-content($fa-var-stop-circle-o); } -.#{$fa-css-prefix}-shopping-bag:before { content: fa-content($fa-var-shopping-bag); } -.#{$fa-css-prefix}-shopping-basket:before { content: fa-content($fa-var-shopping-basket); } -.#{$fa-css-prefix}-hashtag:before { content: fa-content($fa-var-hashtag); } -.#{$fa-css-prefix}-bluetooth:before { content: fa-content($fa-var-bluetooth); } -.#{$fa-css-prefix}-bluetooth-b:before { content: fa-content($fa-var-bluetooth-b); } -.#{$fa-css-prefix}-percent:before { content: fa-content($fa-var-percent); } -.#{$fa-css-prefix}-gitlab:before { content: fa-content($fa-var-gitlab); } -.#{$fa-css-prefix}-wpbeginner:before { content: fa-content($fa-var-wpbeginner); } -.#{$fa-css-prefix}-wpforms:before { content: fa-content($fa-var-wpforms); } -.#{$fa-css-prefix}-envira:before { content: fa-content($fa-var-envira); } -.#{$fa-css-prefix}-universal-access:before { content: fa-content($fa-var-universal-access); } -.#{$fa-css-prefix}-wheelchair-alt:before { content: fa-content($fa-var-wheelchair-alt); } -.#{$fa-css-prefix}-question-circle-o:before { content: fa-content($fa-var-question-circle-o); } -.#{$fa-css-prefix}-blind:before { content: fa-content($fa-var-blind); } -.#{$fa-css-prefix}-audio-description:before { content: fa-content($fa-var-audio-description); } -.#{$fa-css-prefix}-phone-volume:before, -.#{$fa-css-prefix}-volume-control-phone:before { content: fa-content($fa-var-volume-control-phone); } -.#{$fa-css-prefix}-braille:before { content: fa-content($fa-var-braille); } -.#{$fa-css-prefix}-assistive-listening-systems:before { content: fa-content($fa-var-assistive-listening-systems); } -.#{$fa-css-prefix}-asl-interpreting:before, -.#{$fa-css-prefix}-american-sign-language-interpreting:before { content: fa-content($fa-var-american-sign-language-interpreting); } -.#{$fa-css-prefix}-deafness:before, -.#{$fa-css-prefix}-hard-of-hearing:before, -.#{$fa-css-prefix}-deaf:before { content: fa-content($fa-var-deaf); } -.#{$fa-css-prefix}-glide:before { content: fa-content($fa-var-glide); } -.#{$fa-css-prefix}-glide-g:before { content: fa-content($fa-var-glide-g); } -.#{$fa-css-prefix}-signing:before, -.#{$fa-css-prefix}-sign-language:before { content: fa-content($fa-var-sign-language); } -.#{$fa-css-prefix}-low-vision:before { content: fa-content($fa-var-low-vision); } -.#{$fa-css-prefix}-viadeo:before { content: fa-content($fa-var-viadeo); } -.#{$fa-css-prefix}-viadeo-square:before { content: fa-content($fa-var-viadeo-square); } -.#{$fa-css-prefix}-snapchat:before { content: fa-content($fa-var-snapchat); } -.#{$fa-css-prefix}-snapchat-ghost:before { content: fa-content($fa-var-snapchat-ghost); } -.#{$fa-css-prefix}-snapchat-square:before { content: fa-content($fa-var-snapchat-square); } -.#{$fa-css-prefix}-first-order:before { content: fa-content($fa-var-first-order); } -.#{$fa-css-prefix}-yoast:before { content: fa-content($fa-var-yoast); } -.#{$fa-css-prefix}-themeisle:before { content: fa-content($fa-var-themeisle); } -.#{$fa-css-prefix}-google-plus-circle:before, -.#{$fa-css-prefix}-google-plus-official:before { content: fa-content($fa-var-google-plus-official); } -.#{$fa-css-prefix}-fa:before, -.#{$fa-css-prefix}-font-awesome:before { content: fa-content($fa-var-font-awesome); } -.#{$fa-css-prefix}-handshake-o:before { content: fa-content($fa-var-handshake-o); } -.#{$fa-css-prefix}-envelope-open:before { content: fa-content($fa-var-envelope-open); } -.#{$fa-css-prefix}-envelope-open-o:before { content: fa-content($fa-var-envelope-open-o); } -.#{$fa-css-prefix}-linode:before { content: fa-content($fa-var-linode); } -.#{$fa-css-prefix}-address-book:before { content: fa-content($fa-var-address-book); } -.#{$fa-css-prefix}-address-book-o:before { content: fa-content($fa-var-address-book-o); } -.#{$fa-css-prefix}-vcard:before, -.#{$fa-css-prefix}-address-card:before { content: fa-content($fa-var-address-card); } -.#{$fa-css-prefix}-vcard-o:before, -.#{$fa-css-prefix}-address-card-o:before { content: fa-content($fa-var-address-card-o); } -.#{$fa-css-prefix}-user-circle:before { content: fa-content($fa-var-user-circle); } -.#{$fa-css-prefix}-user-circle-o:before { content: fa-content($fa-var-user-circle-o); } -.#{$fa-css-prefix}-user-o:before { content: fa-content($fa-var-user-o); } -.#{$fa-css-prefix}-id-badge:before { content: fa-content($fa-var-id-badge); } -.#{$fa-css-prefix}-drivers-license:before, -.#{$fa-css-prefix}-id-card:before { content: fa-content($fa-var-id-card); } -.#{$fa-css-prefix}-drivers-license-o:before, -.#{$fa-css-prefix}-id-card-o:before { content: fa-content($fa-var-id-card-o); } -.#{$fa-css-prefix}-quora:before { content: fa-content($fa-var-quora); } -.#{$fa-css-prefix}-free-code-camp:before { content: fa-content($fa-var-free-code-camp); } -.#{$fa-css-prefix}-telegram:before { content: fa-content($fa-var-telegram); } -.#{$fa-css-prefix}-thermometer-4:before, -.#{$fa-css-prefix}-thermometer:before, -.#{$fa-css-prefix}-thermometer-full:before { content: fa-content($fa-var-thermometer-full); } -.#{$fa-css-prefix}-thermometer-3:before, -.#{$fa-css-prefix}-thermometer-three-quarters:before { content: fa-content($fa-var-thermometer-three-quarters); } -.#{$fa-css-prefix}-thermometer-2:before, -.#{$fa-css-prefix}-thermometer-half:before { content: fa-content($fa-var-thermometer-half); } -.#{$fa-css-prefix}-thermometer-1:before, -.#{$fa-css-prefix}-thermometer-quarter:before { content: fa-content($fa-var-thermometer-quarter); } -.#{$fa-css-prefix}-thermometer-0:before, -.#{$fa-css-prefix}-thermometer-empty:before { content: fa-content($fa-var-thermometer-empty); } -.#{$fa-css-prefix}-shower:before { content: fa-content($fa-var-shower); } -.#{$fa-css-prefix}-bathtub:before, -.#{$fa-css-prefix}-s15:before, -.#{$fa-css-prefix}-bath:before { content: fa-content($fa-var-bath); } -.#{$fa-css-prefix}-podcast:before { content: fa-content($fa-var-podcast); } -.#{$fa-css-prefix}-window-maximize:before { content: fa-content($fa-var-window-maximize); } -.#{$fa-css-prefix}-window-minimize:before { content: fa-content($fa-var-window-minimize); } -.#{$fa-css-prefix}-window-restore:before { content: fa-content($fa-var-window-restore); } -.#{$fa-css-prefix}-times-rectangle:before, -.#{$fa-css-prefix}-window-close:before { content: fa-content($fa-var-window-close); } -.#{$fa-css-prefix}-times-rectangle-o:before, -.#{$fa-css-prefix}-window-close-o:before { content: fa-content($fa-var-window-close-o); } -.#{$fa-css-prefix}-bandcamp:before { content: fa-content($fa-var-bandcamp); } -.#{$fa-css-prefix}-grav:before { content: fa-content($fa-var-grav); } -.#{$fa-css-prefix}-etsy:before { content: fa-content($fa-var-etsy); } -.#{$fa-css-prefix}-imdb:before { content: fa-content($fa-var-imdb); } -.#{$fa-css-prefix}-ravelry:before { content: fa-content($fa-var-ravelry); } -.#{$fa-css-prefix}-eercast:before { content: fa-content($fa-var-eercast); } -.#{$fa-css-prefix}-microchip:before { content: fa-content($fa-var-microchip); } -.#{$fa-css-prefix}-snowflake-o:before { content: fa-content($fa-var-snowflake-o); } -.#{$fa-css-prefix}-superpowers:before { content: fa-content($fa-var-superpowers); } -.#{$fa-css-prefix}-wpexplorer:before { content: fa-content($fa-var-wpexplorer); } -.#{$fa-css-prefix}-meetup:before { content: fa-content($fa-var-meetup); } -.#{$fa-css-prefix}-mastodon:before { content: fa-content($fa-var-mastodon); } -.#{$fa-css-prefix}-mastodon-alt:before { content: fa-content($fa-var-mastodon-alt); } -.#{$fa-css-prefix}-fork-circle:before, -.#{$fa-css-prefix}-fork-awesome:before { content: fa-content($fa-var-fork-awesome); } -.#{$fa-css-prefix}-peertube:before { content: fa-content($fa-var-peertube); } -.#{$fa-css-prefix}-diaspora:before { content: fa-content($fa-var-diaspora); } -.#{$fa-css-prefix}-friendica:before { content: fa-content($fa-var-friendica); } -.#{$fa-css-prefix}-gnu-social:before { content: fa-content($fa-var-gnu-social); } -.#{$fa-css-prefix}-liberapay-square:before { content: fa-content($fa-var-liberapay-square); } -.#{$fa-css-prefix}-liberapay:before { content: fa-content($fa-var-liberapay); } -.#{$fa-css-prefix}-ssb:before, -.#{$fa-css-prefix}-scuttlebutt:before { content: fa-content($fa-var-scuttlebutt); } -.#{$fa-css-prefix}-hubzilla:before { content: fa-content($fa-var-hubzilla); } -.#{$fa-css-prefix}-social-home:before { content: fa-content($fa-var-social-home); } -.#{$fa-css-prefix}-artstation:before { content: fa-content($fa-var-artstation); } -.#{$fa-css-prefix}-discord:before { content: fa-content($fa-var-discord); } -.#{$fa-css-prefix}-discord-alt:before { content: fa-content($fa-var-discord-alt); } -.#{$fa-css-prefix}-patreon:before { content: fa-content($fa-var-patreon); } -.#{$fa-css-prefix}-snowdrift:before { content: fa-content($fa-var-snowdrift); } -.#{$fa-css-prefix}-activitypub:before { content: fa-content($fa-var-activitypub); } -.#{$fa-css-prefix}-ethereum:before { content: fa-content($fa-var-ethereum); } -.#{$fa-css-prefix}-keybase:before { content: fa-content($fa-var-keybase); } -.#{$fa-css-prefix}-shaarli:before { content: fa-content($fa-var-shaarli); } -.#{$fa-css-prefix}-shaarli-o:before { content: fa-content($fa-var-shaarli-o); } -.#{$fa-css-prefix}-cut-key:before, -.#{$fa-css-prefix}-key-modern:before { content: fa-content($fa-var-key-modern); } -.#{$fa-css-prefix}-xmpp:before { content: fa-content($fa-var-xmpp); } -.#{$fa-css-prefix}-archive-org:before { content: fa-content($fa-var-archive-org); } -.#{$fa-css-prefix}-freedombox:before { content: fa-content($fa-var-freedombox); } -.#{$fa-css-prefix}-facebook-messenger:before { content: fa-content($fa-var-facebook-messenger); } -.#{$fa-css-prefix}-debian:before { content: fa-content($fa-var-debian); } -.#{$fa-css-prefix}-mastodon-square:before { content: fa-content($fa-var-mastodon-square); } -.#{$fa-css-prefix}-tipeee:before { content: fa-content($fa-var-tipeee); } -.#{$fa-css-prefix}-react:before { content: fa-content($fa-var-react); } -.#{$fa-css-prefix}-dogmazic:before { content: fa-content($fa-var-dogmazic); } -.#{$fa-css-prefix}-zotero:before { content: fa-content($fa-var-zotero); } -.#{$fa-css-prefix}-nodejs:before { content: fa-content($fa-var-nodejs); } -.#{$fa-css-prefix}-nextcloud:before { content: fa-content($fa-var-nextcloud); } -.#{$fa-css-prefix}-nextcloud-square:before { content: fa-content($fa-var-nextcloud-square); } -.#{$fa-css-prefix}-hackaday:before { content: fa-content($fa-var-hackaday); } -.#{$fa-css-prefix}-laravel:before { content: fa-content($fa-var-laravel); } -.#{$fa-css-prefix}-signalapp:before { content: fa-content($fa-var-signalapp); } -.#{$fa-css-prefix}-gnupg:before { content: fa-content($fa-var-gnupg); } -.#{$fa-css-prefix}-php:before { content: fa-content($fa-var-php); } -.#{$fa-css-prefix}-ffmpeg:before { content: fa-content($fa-var-ffmpeg); } -.#{$fa-css-prefix}-joplin:before { content: fa-content($fa-var-joplin); } -.#{$fa-css-prefix}-syncthing:before { content: fa-content($fa-var-syncthing); } -.#{$fa-css-prefix}-inkscape:before { content: fa-content($fa-var-inkscape); } -.#{$fa-css-prefix}-matrix-org:before { content: fa-content($fa-var-matrix-org); } -.#{$fa-css-prefix}-pixelfed:before { content: fa-content($fa-var-pixelfed); } -.#{$fa-css-prefix}-bootstrap:before { content: fa-content($fa-var-bootstrap); } -.#{$fa-css-prefix}-dev-to:before { content: fa-content($fa-var-dev-to); } -.#{$fa-css-prefix}-hashnode:before { content: fa-content($fa-var-hashnode); } -.#{$fa-css-prefix}-jirafeau:before { content: fa-content($fa-var-jirafeau); } -.#{$fa-css-prefix}-emby:before { content: fa-content($fa-var-emby); } -.#{$fa-css-prefix}-wikidata:before { content: fa-content($fa-var-wikidata); } -.#{$fa-css-prefix}-gimp:before { content: fa-content($fa-var-gimp); } -.#{$fa-css-prefix}-c:before { content: fa-content($fa-var-c); } -.#{$fa-css-prefix}-digitalocean:before { content: fa-content($fa-var-digitalocean); } -.#{$fa-css-prefix}-att:before { content: fa-content($fa-var-att); } -.#{$fa-css-prefix}-gitea:before { content: fa-content($fa-var-gitea); } -.#{$fa-css-prefix}-file-epub:before { content: fa-content($fa-var-file-epub); } -.#{$fa-css-prefix}-python:before { content: fa-content($fa-var-python); } -.#{$fa-css-prefix}-archlinux:before { content: fa-content($fa-var-archlinux); } -.#{$fa-css-prefix}-pleroma:before { content: fa-content($fa-var-pleroma); } -.#{$fa-css-prefix}-unsplash:before { content: fa-content($fa-var-unsplash); } -.#{$fa-css-prefix}-hackster:before { content: fa-content($fa-var-hackster); } -.#{$fa-css-prefix}-spell-check:before { content: fa-content($fa-var-spell-check); } -.#{$fa-css-prefix}-moon:before { content: fa-content($fa-var-moon); } -.#{$fa-css-prefix}-sun:before { content: fa-content($fa-var-sun); } -.#{$fa-css-prefix}-f-droid:before { content: fa-content($fa-var-f-droid); } -.#{$fa-css-prefix}-biometric:before { content: fa-content($fa-var-biometric); } -.#{$fa-css-prefix}-wire:before { content: fa-content($fa-var-wire); } -.#{$fa-css-prefix}-tor-onion:before { content: fa-content($fa-var-tor-onion); } -.#{$fa-css-prefix}-volume-mute:before { content: fa-content($fa-var-volume-mute); } -.#{$fa-css-prefix}-bell-ringing:before { content: fa-content($fa-var-bell-ringing); } -.#{$fa-css-prefix}-bell-ringing-o:before { content: fa-content($fa-var-bell-ringing-o); } -.#{$fa-css-prefix}-hal:before { content: fa-content($fa-var-hal); } -.#{$fa-css-prefix}-jupyter:before { content: fa-content($fa-var-jupyter); } -.#{$fa-css-prefix}-julia:before { content: fa-content($fa-var-julia); } -.#{$fa-css-prefix}-classicpress:before { content: fa-content($fa-var-classicpress); } -.#{$fa-css-prefix}-classicpress-circle:before { content: fa-content($fa-var-classicpress-circle); } -.#{$fa-css-prefix}-open-collective:before { content: fa-content($fa-var-open-collective); } -.#{$fa-css-prefix}-orcid:before { content: fa-content($fa-var-orcid); } -.#{$fa-css-prefix}-researchgate:before { content: fa-content($fa-var-researchgate); } -.#{$fa-css-prefix}-funkwhale:before { content: fa-content($fa-var-funkwhale); } -.#{$fa-css-prefix}-askfm:before { content: fa-content($fa-var-askfm); } -.#{$fa-css-prefix}-blockstack:before { content: fa-content($fa-var-blockstack); } -.#{$fa-css-prefix}-boardgamegeek:before { content: fa-content($fa-var-boardgamegeek); } -.#{$fa-css-prefix}-bunny:before { content: fa-content($fa-var-bunny); } -.#{$fa-css-prefix}-buymeacoffee:before { content: fa-content($fa-var-buymeacoffee); } -.#{$fa-css-prefix}-cc-by:before { content: fa-content($fa-var-cc-by); } -.#{$fa-css-prefix}-creative-commons-alt:before, -.#{$fa-css-prefix}-cc-cc:before { content: fa-content($fa-var-cc-cc); } -.#{$fa-css-prefix}-cc-nc-eu:before { content: fa-content($fa-var-cc-nc-eu); } -.#{$fa-css-prefix}-cc-nc-jp:before { content: fa-content($fa-var-cc-nc-jp); } -.#{$fa-css-prefix}-cc-nc:before { content: fa-content($fa-var-cc-nc); } -.#{$fa-css-prefix}-cc-nd:before { content: fa-content($fa-var-cc-nd); } -.#{$fa-css-prefix}-cc-pd:before { content: fa-content($fa-var-cc-pd); } -.#{$fa-css-prefix}-cc-remix:before { content: fa-content($fa-var-cc-remix); } -.#{$fa-css-prefix}-cc-sa:before { content: fa-content($fa-var-cc-sa); } -.#{$fa-css-prefix}-cc-share:before { content: fa-content($fa-var-cc-share); } -.#{$fa-css-prefix}-cc-zero:before { content: fa-content($fa-var-cc-zero); } -.#{$fa-css-prefix}-conway-hacker:before, -.#{$fa-css-prefix}-conway-glider:before { content: fa-content($fa-var-conway-glider); } -.#{$fa-css-prefix}-csharp:before { content: fa-content($fa-var-csharp); } -.#{$fa-css-prefix}-email-bulk:before { content: fa-content($fa-var-email-bulk); } -.#{$fa-css-prefix}-email-bulk-o:before { content: fa-content($fa-var-email-bulk-o); } -.#{$fa-css-prefix}-gnu:before { content: fa-content($fa-var-gnu); } -.#{$fa-css-prefix}-google-play:before { content: fa-content($fa-var-google-play); } -.#{$fa-css-prefix}-heroku:before { content: fa-content($fa-var-heroku); } -.#{$fa-css-prefix}-hassio:before, -.#{$fa-css-prefix}-home-assistant:before { content: fa-content($fa-var-home-assistant); } -.#{$fa-css-prefix}-java:before { content: fa-content($fa-var-java); } -.#{$fa-css-prefix}-mariadb:before { content: fa-content($fa-var-mariadb); } -.#{$fa-css-prefix}-markdown:before { content: fa-content($fa-var-markdown); } -.#{$fa-css-prefix}-mysql:before { content: fa-content($fa-var-mysql); } -.#{$fa-css-prefix}-nordcast:before { content: fa-content($fa-var-nordcast); } -.#{$fa-css-prefix}-plume:before { content: fa-content($fa-var-plume); } -.#{$fa-css-prefix}-postgresql:before { content: fa-content($fa-var-postgresql); } -.#{$fa-css-prefix}-sass-alt:before { content: fa-content($fa-var-sass-alt); } -.#{$fa-css-prefix}-sass:before { content: fa-content($fa-var-sass); } -.#{$fa-css-prefix}-skate:before { content: fa-content($fa-var-skate); } -.#{$fa-css-prefix}-sketchfab:before { content: fa-content($fa-var-sketchfab); } -.#{$fa-css-prefix}-tex:before { content: fa-content($fa-var-tex); } -.#{$fa-css-prefix}-textpattern:before { content: fa-content($fa-var-textpattern); } -.#{$fa-css-prefix}-unity:before { content: fa-content($fa-var-unity); } diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_larger.scss b/themes/hugo-coder/assets/scss/fork-awesome/_larger.scss deleted file mode 100644 index 41e9a818..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_larger.scss +++ /dev/null @@ -1,13 +0,0 @@ -// Icon Sizes -// ------------------------- - -/* makes the font 33% larger relative to the icon container */ -.#{$fa-css-prefix}-lg { - font-size: (4em / 3); - line-height: (3em / 4); - vertical-align: -15%; -} -.#{$fa-css-prefix}-2x { font-size: 2em; } -.#{$fa-css-prefix}-3x { font-size: 3em; } -.#{$fa-css-prefix}-4x { font-size: 4em; } -.#{$fa-css-prefix}-5x { font-size: 5em; } diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_list.scss b/themes/hugo-coder/assets/scss/fork-awesome/_list.scss deleted file mode 100644 index 7d1e4d54..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_list.scss +++ /dev/null @@ -1,19 +0,0 @@ -// List Icons -// ------------------------- - -.#{$fa-css-prefix}-ul { - padding-left: 0; - margin-left: $fa-li-width; - list-style-type: none; - > li { position: relative; } -} -.#{$fa-css-prefix}-li { - position: absolute; - left: -$fa-li-width; - width: $fa-li-width; - top: (2em / 14); - text-align: center; - &.#{$fa-css-prefix}-lg { - left: -$fa-li-width + (4em / 14); - } -} diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_mixins.scss b/themes/hugo-coder/assets/scss/fork-awesome/_mixins.scss deleted file mode 100644 index 6fdb1284..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_mixins.scss +++ /dev/null @@ -1,60 +0,0 @@ -// Mixins -// -------------------------- - -@mixin fa-icon() { - display: inline-block; - font: normal normal normal #{$fa-font-size-base}/#{$fa-line-height-base} #{$fa-font-family}; // shortening font declaration - font-size: inherit; // can't have font-size inherit on line above, so need to override - text-rendering: auto; // optimizelegibility throws things off #1094 - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; - -} - -@mixin fa-icon-rotate($degrees, $rotation) { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=#{$rotation})"; - -webkit-transform: rotate($degrees); - -ms-transform: rotate($degrees); - transform: rotate($degrees); -} - -@mixin fa-icon-flip($horiz, $vert, $rotation) { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=#{$rotation}, mirror=1)"; - -webkit-transform: scale($horiz, $vert); - -ms-transform: scale($horiz, $vert); - transform: scale($horiz, $vert); -} - - -// Only display content to screen readers. A la Bootstrap 4. -// -// See: http://a11yproject.com/posts/how-to-hide-content/ - -@mixin sr-only { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0,0,0,0); - border: 0; -} - -// Use in conjunction with .sr-only to only display content when it's focused. -// -// Useful for "Skip to main content" links; see http://www.w3.org/TR/2013/NOTE-WCAG20-TECHS-20130905/G1 -// -// Credit: HTML5 Boilerplate - -@mixin sr-only-focusable { - &:active, - &:focus { - position: static; - width: auto; - height: auto; - margin: 0; - overflow: visible; - clip: auto; - } -} diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_path.scss b/themes/hugo-coder/assets/scss/fork-awesome/_path.scss deleted file mode 100644 index 1566182d..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_path.scss +++ /dev/null @@ -1,16 +0,0 @@ -/* FONT PATH - * -------------------------- */ - -@font-face { - font-family: '#{$fa-font-family}'; - src: url('#{$fa-font-path}/forkawesome-webfont.eot?v=#{$fa-version}'); - src: url('#{$fa-font-path}/forkawesome-webfont.eot?#iefix&v=#{$fa-version}') format('embedded-opentype'), - url('#{$fa-font-path}/forkawesome-webfont.woff2?v=#{$fa-version}') format('woff2'), - url('#{$fa-font-path}/forkawesome-webfont.woff?v=#{$fa-version}') format('woff'), - url('#{$fa-font-path}/forkawesome-webfont.ttf?v=#{$fa-version}') format('truetype'), - url('#{$fa-font-path}/forkawesome-webfont.svg?v=#{$fa-version}#forkawesomeregular') format('svg'); -// src: url('#{$fa-font-path}/ForkAwesome.otf') format('opentype'); // used when developing fonts - font-weight: normal; - font-style: normal; - font-display: block; -} diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_rotated-flipped.scss b/themes/hugo-coder/assets/scss/fork-awesome/_rotated-flipped.scss deleted file mode 100644 index a3558fd0..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_rotated-flipped.scss +++ /dev/null @@ -1,20 +0,0 @@ -// Rotated & Flipped Icons -// ------------------------- - -.#{$fa-css-prefix}-rotate-90 { @include fa-icon-rotate(90deg, 1); } -.#{$fa-css-prefix}-rotate-180 { @include fa-icon-rotate(180deg, 2); } -.#{$fa-css-prefix}-rotate-270 { @include fa-icon-rotate(270deg, 3); } - -.#{$fa-css-prefix}-flip-horizontal { @include fa-icon-flip(-1, 1, 0); } -.#{$fa-css-prefix}-flip-vertical { @include fa-icon-flip(1, -1, 2); } - -// Hook for IE8-9 -// ------------------------- - -:root .#{$fa-css-prefix}-rotate-90, -:root .#{$fa-css-prefix}-rotate-180, -:root .#{$fa-css-prefix}-rotate-270, -:root .#{$fa-css-prefix}-flip-horizontal, -:root .#{$fa-css-prefix}-flip-vertical { - filter: none; -} diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_screen-reader.scss b/themes/hugo-coder/assets/scss/fork-awesome/_screen-reader.scss deleted file mode 100644 index 637426f0..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_screen-reader.scss +++ /dev/null @@ -1,5 +0,0 @@ -// Screen Readers -// ------------------------- - -.sr-only { @include sr-only(); } -.sr-only-focusable { @include sr-only-focusable(); } diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_stacked.scss b/themes/hugo-coder/assets/scss/fork-awesome/_stacked.scss deleted file mode 100644 index aef74036..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_stacked.scss +++ /dev/null @@ -1,20 +0,0 @@ -// Stacked Icons -// ------------------------- - -.#{$fa-css-prefix}-stack { - position: relative; - display: inline-block; - width: 2em; - height: 2em; - line-height: 2em; - vertical-align: middle; -} -.#{$fa-css-prefix}-stack-1x, .#{$fa-css-prefix}-stack-2x { - position: absolute; - left: 0; - width: 100%; - text-align: center; -} -.#{$fa-css-prefix}-stack-1x { line-height: inherit; } -.#{$fa-css-prefix}-stack-2x { font-size: 2em; } -.#{$fa-css-prefix}-inverse { color: $fa-inverse; } diff --git a/themes/hugo-coder/assets/scss/fork-awesome/_variables.scss b/themes/hugo-coder/assets/scss/fork-awesome/_variables.scss deleted file mode 100644 index 3a8abf3d..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/_variables.scss +++ /dev/null @@ -1,945 +0,0 @@ -// Variables -// -------------------------- - -$fa-font-path: "../fonts" !default; -$fa-font-size-base: 14px !default; -$fa-line-height-base: 1 !default; -$fa-css-prefix: "fa" !default; -$fa-font-family: "ForkAwesome" !default; -$fa-version: "1.2.0" !default; -$fa-border-color: #eee !default; -$fa-inverse: #fff !default; -$fa-li-width: (30em / 14) !default; - -$fa-var-500px: \f26e; -$fa-var-activitypub: \f2f2; -$fa-var-address-book: \f2b9; -$fa-var-address-book-o: \f2ba; -$fa-var-address-card: \f2bb; -$fa-var-address-card-o: \f2bc; -$fa-var-adjust: \f042; -$fa-var-adn: \f170; -$fa-var-align-center: \f037; -$fa-var-align-justify: \f039; -$fa-var-align-left: \f036; -$fa-var-align-right: \f038; -$fa-var-amazon: \f270; -$fa-var-ambulance: \f0f9; -$fa-var-american-sign-language-interpreting: \f2a3; -$fa-var-anchor: \f13d; -$fa-var-android: \f17b; -$fa-var-angellist: \f209; -$fa-var-angle-double-down: \f103; -$fa-var-angle-double-left: \f100; -$fa-var-angle-double-right: \f101; -$fa-var-angle-double-up: \f102; -$fa-var-angle-down: \f107; -$fa-var-angle-left: \f104; -$fa-var-angle-right: \f105; -$fa-var-angle-up: \f106; -$fa-var-apple: \f179; -$fa-var-archive: \f187; -$fa-var-archive-org: \f2fc; -$fa-var-archlinux: \f323; -$fa-var-area-chart: \f1fe; -$fa-var-arrow-circle-down: \f0ab; -$fa-var-arrow-circle-left: \f0a8; -$fa-var-arrow-circle-o-down: \f01a; -$fa-var-arrow-circle-o-left: \f190; -$fa-var-arrow-circle-o-right: \f18e; -$fa-var-arrow-circle-o-up: \f01b; -$fa-var-arrow-circle-right: \f0a9; -$fa-var-arrow-circle-up: \f0aa; -$fa-var-arrow-down: \f063; -$fa-var-arrow-left: \f060; -$fa-var-arrow-right: \f061; -$fa-var-arrow-up: \f062; -$fa-var-arrows: \f047; -$fa-var-arrows-alt: \f0b2; -$fa-var-arrows-h: \f07e; -$fa-var-arrows-v: \f07d; -$fa-var-artstation: \f2ed; -$fa-var-askfm: \f33a; -$fa-var-asl-interpreting: \f2a3; -$fa-var-assistive-listening-systems: \f2a2; -$fa-var-asterisk: \f069; -$fa-var-at: \f1fa; -$fa-var-att: \f31e; -$fa-var-audio-description: \f29e; -$fa-var-automobile: \f1b9; -$fa-var-backward: \f04a; -$fa-var-balance-scale: \f24e; -$fa-var-ban: \f05e; -$fa-var-bandcamp: \f2d5; -$fa-var-bank: \f19c; -$fa-var-bar-chart: \f080; -$fa-var-bar-chart-o: \f080; -$fa-var-barcode: \f02a; -$fa-var-bars: \f0c9; -$fa-var-bath: \f2cd; -$fa-var-bathtub: \f2cd; -$fa-var-battery: \f240; -$fa-var-battery-0: \f244; -$fa-var-battery-1: \f243; -$fa-var-battery-2: \f242; -$fa-var-battery-3: \f241; -$fa-var-battery-4: \f240; -$fa-var-battery-empty: \f244; -$fa-var-battery-full: \f240; -$fa-var-battery-half: \f242; -$fa-var-battery-quarter: \f243; -$fa-var-battery-three-quarters: \f241; -$fa-var-bed: \f236; -$fa-var-beer: \f0fc; -$fa-var-behance: \f1b4; -$fa-var-behance-square: \f1b5; -$fa-var-bell: \f0a2; -$fa-var-bell-o: \f0f3; -$fa-var-bell-ringing: \f32d; -$fa-var-bell-ringing-o: \f330; -$fa-var-bell-slash: \f1f6; -$fa-var-bell-slash-o: \f1f7; -$fa-var-bicycle: \f206; -$fa-var-binoculars: \f1e5; -$fa-var-biometric: \f32b; -$fa-var-birthday-cake: \f1fd; -$fa-var-bitbucket: \f171; -$fa-var-bitbucket-square: \f172; -$fa-var-bitcoin: \f15a; -$fa-var-black-tie: \f27e; -$fa-var-blind: \f29d; -$fa-var-blockstack: \f33b; -$fa-var-bluetooth: \f293; -$fa-var-bluetooth-b: \f294; -$fa-var-boardgamegeek: \f33c; -$fa-var-bold: \f032; -$fa-var-bolt: \f0e7; -$fa-var-bomb: \f1e2; -$fa-var-book: \f02d; -$fa-var-bookmark: \f02e; -$fa-var-bookmark-o: \f097; -$fa-var-bootstrap: \f315; -$fa-var-braille: \f2a1; -$fa-var-briefcase: \f0b1; -$fa-var-btc: \f15a; -$fa-var-bug: \f188; -$fa-var-building: \f1ad; -$fa-var-building-o: \f0f7; -$fa-var-bullhorn: \f0a1; -$fa-var-bullseye: \f140; -$fa-var-bunny: \f35f; -$fa-var-bus: \f207; -$fa-var-buymeacoffee: \f33d; -$fa-var-buysellads: \f20d; -$fa-var-c: \f31c; -$fa-var-cab: \f1ba; -$fa-var-calculator: \f1ec; -$fa-var-calendar: \f073; -$fa-var-calendar-check-o: \f274; -$fa-var-calendar-minus-o: \f272; -$fa-var-calendar-o: \f133; -$fa-var-calendar-plus-o: \f271; -$fa-var-calendar-times-o: \f273; -$fa-var-camera: \f030; -$fa-var-camera-retro: \f083; -$fa-var-car: \f1b9; -$fa-var-caret-down: \f0d7; -$fa-var-caret-left: \f0d9; -$fa-var-caret-right: \f0da; -$fa-var-caret-square-o-down: \f150; -$fa-var-caret-square-o-left: \f191; -$fa-var-caret-square-o-right: \f152; -$fa-var-caret-square-o-up: \f151; -$fa-var-caret-up: \f0d8; -$fa-var-cart-arrow-down: \f218; -$fa-var-cart-plus: \f217; -$fa-var-cc: \f20a; -$fa-var-cc-amex: \f1f3; -$fa-var-cc-by: \f33e; -$fa-var-cc-cc: \f33f; -$fa-var-cc-diners-club: \f24c; -$fa-var-cc-discover: \f1f2; -$fa-var-cc-jcb: \f24b; -$fa-var-cc-mastercard: \f1f1; -$fa-var-cc-nc: \f340; -$fa-var-cc-nc-eu: \f341; -$fa-var-cc-nc-jp: \f342; -$fa-var-cc-nd: \f343; -$fa-var-cc-paypal: \f1f4; -$fa-var-cc-pd: \f344; -$fa-var-cc-remix: \f345; -$fa-var-cc-sa: \f346; -$fa-var-cc-share: \f347; -$fa-var-cc-stripe: \f1f5; -$fa-var-cc-visa: \f1f0; -$fa-var-cc-zero: \f348; -$fa-var-certificate: \f0a3; -$fa-var-chain: \f0c1; -$fa-var-chain-broken: \f127; -$fa-var-check: \f00c; -$fa-var-check-circle: \f058; -$fa-var-check-circle-o: \f05d; -$fa-var-check-square: \f14a; -$fa-var-check-square-o: \f046; -$fa-var-chevron-circle-down: \f13a; -$fa-var-chevron-circle-left: \f137; -$fa-var-chevron-circle-right: \f138; -$fa-var-chevron-circle-up: \f139; -$fa-var-chevron-down: \f078; -$fa-var-chevron-left: \f053; -$fa-var-chevron-right: \f054; -$fa-var-chevron-up: \f077; -$fa-var-child: \f1ae; -$fa-var-chrome: \f268; -$fa-var-circle: \f111; -$fa-var-circle-o: \f10c; -$fa-var-circle-o-notch: \f1ce; -$fa-var-circle-thin: \f1db; -$fa-var-classicpress: \f331; -$fa-var-classicpress-circle: \f332; -$fa-var-clipboard: \f0ea; -$fa-var-clock-o: \f017; -$fa-var-clone: \f24d; -$fa-var-close: \f00d; -$fa-var-closed-captioning: \f20a; -$fa-var-cloud: \f0c2; -$fa-var-cloud-download: \f0ed; -$fa-var-cloud-upload: \f0ee; -$fa-var-cny: \f157; -$fa-var-code: \f121; -$fa-var-code-fork: \f126; -$fa-var-codepen: \f1cb; -$fa-var-codiepie: \f284; -$fa-var-coffee: \f0f4; -$fa-var-cog: \f013; -$fa-var-cogs: \f085; -$fa-var-columns: \f0db; -$fa-var-comment: \f075; -$fa-var-comment-o: \f0e5; -$fa-var-commenting: \f27a; -$fa-var-commenting-o: \f27b; -$fa-var-comments: \f086; -$fa-var-comments-o: \f0e6; -$fa-var-community: \f0c0; -$fa-var-compass: \f14e; -$fa-var-compress: \f066; -$fa-var-connectdevelop: \f20e; -$fa-var-contao: \f26d; -$fa-var-conway-glider: \f349; -$fa-var-conway-hacker: \f349; -$fa-var-copy: \f0c5; -$fa-var-copyright: \f1f9; -$fa-var-creative-commons: \f25e; -$fa-var-creative-commons-alt: \f33f; -$fa-var-credit-card: \f09d; -$fa-var-credit-card-alt: \f283; -$fa-var-crop: \f125; -$fa-var-crosshairs: \f05b; -$fa-var-csharp: \f34a; -$fa-var-css3: \f13c; -$fa-var-cube: \f1b2; -$fa-var-cubes: \f1b3; -$fa-var-cut: \f0c4; -$fa-var-cut-key: \f2f7; -$fa-var-cutlery: \f0f5; -$fa-var-dashboard: \f0e4; -$fa-var-dashcube: \f210; -$fa-var-database: \f1c0; -$fa-var-deaf: \f2a4; -$fa-var-deafness: \f2a4; -$fa-var-debian: \f2ff; -$fa-var-dedent: \f03b; -$fa-var-delicious: \f1a5; -$fa-var-desktop: \f108; -$fa-var-dev-to: \f316; -$fa-var-deviantart: \f1bd; -$fa-var-diamond: \f219; -$fa-var-diaspora: \f2e5; -$fa-var-digg: \f1a6; -$fa-var-digitalocean: \f31d; -$fa-var-discord: \f2ee; -$fa-var-discord-alt: \f2ef; -$fa-var-dogmazic: \f303; -$fa-var-dollar: \f155; -$fa-var-dot-circle-o: \f192; -$fa-var-download: \f019; -$fa-var-dribbble: \f17d; -$fa-var-drivers-license: \f2c2; -$fa-var-drivers-license-o: \f2c3; -$fa-var-dropbox: \f16b; -$fa-var-drupal: \f1a9; -$fa-var-edge: \f282; -$fa-var-edit: \f044; -$fa-var-eercast: \f2da; -$fa-var-eject: \f052; -$fa-var-ellipsis-h: \f141; -$fa-var-ellipsis-v: \f142; -$fa-var-email-bulk: \f34b; -$fa-var-email-bulk-o: \f34c; -$fa-var-emby: \f319; -$fa-var-empire: \f1d1; -$fa-var-envelope: \f0e0; -$fa-var-envelope-o: \f003; -$fa-var-envelope-open: \f2b6; -$fa-var-envelope-open-o: \f2b7; -$fa-var-envelope-square: \f199; -$fa-var-envira: \f299; -$fa-var-eraser: \f12d; -$fa-var-ethereum: \f2f3; -$fa-var-etsy: \f2d7; -$fa-var-eur: \f153; -$fa-var-euro: \f153; -$fa-var-exchange: \f0ec; -$fa-var-exclamation: \f12a; -$fa-var-exclamation-circle: \f06a; -$fa-var-exclamation-triangle: \f071; -$fa-var-expand: \f065; -$fa-var-expeditedssl: \f23e; -$fa-var-external-link: \f08e; -$fa-var-external-link-square: \f14c; -$fa-var-eye: \f06e; -$fa-var-eye-slash: \f070; -$fa-var-eyedropper: \f1fb; -$fa-var-f-droid: \f32a; -$fa-var-fa: \f2b4; -$fa-var-facebook: \f09a; -$fa-var-facebook-f: \f09a; -$fa-var-facebook-messenger: \f2fe; -$fa-var-facebook-official: \f230; -$fa-var-facebook-square: \f082; -$fa-var-fast-backward: \f049; -$fa-var-fast-forward: \f050; -$fa-var-fax: \f1ac; -$fa-var-feed: \f09e; -$fa-var-female: \f182; -$fa-var-ffmpeg: \f30f; -$fa-var-fighter-jet: \f0fb; -$fa-var-file: \f15b; -$fa-var-file-archive-o: \f1c6; -$fa-var-file-audio-o: \f1c7; -$fa-var-file-code-o: \f1c9; -$fa-var-file-epub: \f321; -$fa-var-file-excel-o: \f1c3; -$fa-var-file-image-o: \f1c5; -$fa-var-file-movie-o: \f1c8; -$fa-var-file-o: \f016; -$fa-var-file-pdf-o: \f1c1; -$fa-var-file-photo-o: \f1c5; -$fa-var-file-picture-o: \f1c5; -$fa-var-file-powerpoint-o: \f1c4; -$fa-var-file-sound-o: \f1c7; -$fa-var-file-text: \f15c; -$fa-var-file-text-o: \f0f6; -$fa-var-file-video-o: \f1c8; -$fa-var-file-word-o: \f1c2; -$fa-var-file-zip-o: \f1c6; -$fa-var-files-o: \f0c5; -$fa-var-film: \f008; -$fa-var-filter: \f0b0; -$fa-var-fire: \f06d; -$fa-var-fire-extinguisher: \f134; -$fa-var-firefox: \f269; -$fa-var-first-order: \f2b0; -$fa-var-flag: \f024; -$fa-var-flag-checkered: \f11e; -$fa-var-flag-o: \f11d; -$fa-var-flash: \f0e7; -$fa-var-flask: \f0c3; -$fa-var-flickr: \f16e; -$fa-var-floppy-o: \f0c7; -$fa-var-folder: \f07b; -$fa-var-folder-o: \f114; -$fa-var-folder-open: \f07c; -$fa-var-folder-open-o: \f115; -$fa-var-font: \f031; -$fa-var-font-awesome: \f2b4; -$fa-var-fonticons: \f280; -$fa-var-fork-awesome: \f2e3; -$fa-var-fork-circle: \f2e3; -$fa-var-fort-awesome: \f286; -$fa-var-forumbee: \f211; -$fa-var-forward: \f04e; -$fa-var-foursquare: \f180; -$fa-var-free-code-camp: \f2c5; -$fa-var-freedombox: \f2fd; -$fa-var-friendica: \f2e6; -$fa-var-frown-o: \f119; -$fa-var-funkwhale: \f339; -$fa-var-futbol-o: \f1e3; -$fa-var-gamepad: \f11b; -$fa-var-gavel: \f0e3; -$fa-var-gbp: \f154; -$fa-var-ge: \f1d1; -$fa-var-gear: \f013; -$fa-var-gears: \f085; -$fa-var-gem: \f219; -$fa-var-genderless: \f22d; -$fa-var-get-pocket: \f265; -$fa-var-gg: \f260; -$fa-var-gg-circle: \f261; -$fa-var-gift: \f06b; -$fa-var-gimp: \f31b; -$fa-var-git: \f1d3; -$fa-var-git-square: \f1d2; -$fa-var-gitea: \f31f; -$fa-var-github: \f09b; -$fa-var-github-alt: \f113; -$fa-var-github-square: \f092; -$fa-var-gitlab: \f296; -$fa-var-gittip: \f184; -$fa-var-glass: \f000; -$fa-var-glide: \f2a5; -$fa-var-glide-g: \f2a6; -$fa-var-globe: \f0ac; -$fa-var-globe-e: \f304; -$fa-var-globe-w: \f305; -$fa-var-gnu: \f34d; -$fa-var-gnu-social: \f2e7; -$fa-var-gnupg: \f30d; -$fa-var-google: \f1a0; -$fa-var-google-play: \f34e; -$fa-var-google-plus: \f0d5; -$fa-var-google-plus-circle: \f2b3; -$fa-var-google-plus-g: \f0d5; -$fa-var-google-plus-official: \f2b3; -$fa-var-google-plus-square: \f0d4; -$fa-var-google-wallet: \f1ee; -$fa-var-graduation-cap: \f19d; -$fa-var-gratipay: \f184; -$fa-var-grav: \f2d6; -$fa-var-group: \f0c0; -$fa-var-h-square: \f0fd; -$fa-var-hackaday: \f30a; -$fa-var-hacker-news: \f1d4; -$fa-var-hackster: \f326; -$fa-var-hal: \f333; -$fa-var-hand-grab-o: \f255; -$fa-var-hand-lizard-o: \f258; -$fa-var-hand-o-down: \f0a7; -$fa-var-hand-o-left: \f0a5; -$fa-var-hand-o-right: \f0a4; -$fa-var-hand-o-up: \f0a6; -$fa-var-hand-paper-o: \f256; -$fa-var-hand-peace-o: \f25b; -$fa-var-hand-pointer-o: \f25a; -$fa-var-hand-rock-o: \f255; -$fa-var-hand-scissors-o: \f257; -$fa-var-hand-spock-o: \f259; -$fa-var-hand-stop-o: \f256; -$fa-var-handshake-o: \f2b5; -$fa-var-hard-of-hearing: \f2a4; -$fa-var-hashnode: \f317; -$fa-var-hashtag: \f292; -$fa-var-hassio: \f350; -$fa-var-hdd-o: \f0a0; -$fa-var-header: \f1dc; -$fa-var-heading: \f1dc; -$fa-var-headphones: \f025; -$fa-var-heart: \f004; -$fa-var-heart-o: \f08a; -$fa-var-heartbeat: \f21e; -$fa-var-heroku: \f34f; -$fa-var-history: \f1da; -$fa-var-home: \f015; -$fa-var-home-assistant: \f350; -$fa-var-hospital-o: \f0f8; -$fa-var-hotel: \f236; -$fa-var-hourglass: \f254; -$fa-var-hourglass-1: \f251; -$fa-var-hourglass-2: \f252; -$fa-var-hourglass-3: \f253; -$fa-var-hourglass-end: \f253; -$fa-var-hourglass-half: \f252; -$fa-var-hourglass-o: \f250; -$fa-var-hourglass-start: \f251; -$fa-var-houzz: \f27c; -$fa-var-html5: \f13b; -$fa-var-hubzilla: \f2eb; -$fa-var-i-cursor: \f246; -$fa-var-id-badge: \f2c1; -$fa-var-id-card: \f2c2; -$fa-var-id-card-o: \f2c3; -$fa-var-ils: \f20b; -$fa-var-image: \f03e; -$fa-var-imdb: \f2d8; -$fa-var-inbox: \f01c; -$fa-var-indent: \f03c; -$fa-var-industry: \f275; -$fa-var-info: \f129; -$fa-var-info-circle: \f05a; -$fa-var-inkscape: \f312; -$fa-var-inr: \f156; -$fa-var-instagram: \f16d; -$fa-var-institution: \f19c; -$fa-var-internet-explorer: \f26b; -$fa-var-intersex: \f224; -$fa-var-ioxhost: \f208; -$fa-var-italic: \f033; -$fa-var-java: \f351; -$fa-var-jirafeau: \f318; -$fa-var-joomla: \f1aa; -$fa-var-joplin: \f310; -$fa-var-jpy: \f157; -$fa-var-jsfiddle: \f1cc; -$fa-var-julia: \f334; -$fa-var-jupyter: \f335; -$fa-var-key: \f084; -$fa-var-key-modern: \f2f7; -$fa-var-keybase: \f2f4; -$fa-var-keyboard-o: \f11c; -$fa-var-krw: \f159; -$fa-var-language: \f1ab; -$fa-var-laptop: \f109; -$fa-var-laravel: \f30b; -$fa-var-lastfm: \f202; -$fa-var-lastfm-square: \f203; -$fa-var-leaf: \f06c; -$fa-var-leanpub: \f212; -$fa-var-legal: \f0e3; -$fa-var-lemon-o: \f094; -$fa-var-level-down: \f149; -$fa-var-level-up: \f148; -$fa-var-liberapay: \f2e9; -$fa-var-liberapay-square: \f2e8; -$fa-var-life-bouy: \f1cd; -$fa-var-life-buoy: \f1cd; -$fa-var-life-ring: \f1cd; -$fa-var-life-saver: \f1cd; -$fa-var-lightbulb-o: \f0eb; -$fa-var-line-chart: \f201; -$fa-var-link: \f0c1; -$fa-var-linkedin: \f0e1; -$fa-var-linkedin-square: \f08c; -$fa-var-linode: \f2b8; -$fa-var-linux: \f17c; -$fa-var-list: \f03a; -$fa-var-list-alt: \f022; -$fa-var-list-ol: \f0cb; -$fa-var-list-ul: \f0ca; -$fa-var-location-arrow: \f124; -$fa-var-lock: \f023; -$fa-var-long-arrow-down: \f175; -$fa-var-long-arrow-left: \f177; -$fa-var-long-arrow-right: \f178; -$fa-var-long-arrow-up: \f176; -$fa-var-low-vision: \f2a8; -$fa-var-magic: \f0d0; -$fa-var-magnet: \f076; -$fa-var-mail-forward: \f064; -$fa-var-mail-reply: \f112; -$fa-var-mail-reply-all: \f122; -$fa-var-male: \f183; -$fa-var-map: \f279; -$fa-var-map-marker: \f041; -$fa-var-map-o: \f278; -$fa-var-map-pin: \f276; -$fa-var-map-signs: \f277; -$fa-var-mariadb: \f352; -$fa-var-markdown: \f353; -$fa-var-mars: \f222; -$fa-var-mars-double: \f227; -$fa-var-mars-stroke: \f229; -$fa-var-mars-stroke-h: \f22b; -$fa-var-mars-stroke-v: \f22a; -$fa-var-mastodon: \f2e1; -$fa-var-mastodon-alt: \f2e2; -$fa-var-mastodon-square: \f300; -$fa-var-matrix-org: \f313; -$fa-var-maxcdn: \f136; -$fa-var-meanpath: \f20c; -$fa-var-medium: \f23a; -$fa-var-medium-square: \f2f8; -$fa-var-medkit: \f0fa; -$fa-var-meetup: \f2e0; -$fa-var-meh-o: \f11a; -$fa-var-mercury: \f223; -$fa-var-microchip: \f2db; -$fa-var-microphone: \f130; -$fa-var-microphone-slash: \f131; -$fa-var-minus: \f068; -$fa-var-minus-circle: \f056; -$fa-var-minus-square: \f146; -$fa-var-minus-square-o: \f147; -$fa-var-mixcloud: \f289; -$fa-var-mobile: \f10b; -$fa-var-mobile-phone: \f10b; -$fa-var-modx: \f285; -$fa-var-money: \f0d6; -$fa-var-moon: \f328; -$fa-var-moon-o: \f186; -$fa-var-mortar-board: \f19d; -$fa-var-motorcycle: \f21c; -$fa-var-mouse-pointer: \f245; -$fa-var-music: \f001; -$fa-var-mysql: \f354; -$fa-var-navicon: \f0c9; -$fa-var-neuter: \f22c; -$fa-var-newspaper-o: \f1ea; -$fa-var-nextcloud: \f306; -$fa-var-nextcloud-square: \f307; -$fa-var-nodejs: \f308; -$fa-var-nordcast: \f355; -$fa-var-object-group: \f247; -$fa-var-object-ungroup: \f248; -$fa-var-odnoklassniki: \f263; -$fa-var-odnoklassniki-square: \f264; -$fa-var-open-collective: \f336; -$fa-var-opencart: \f23d; -$fa-var-openid: \f19b; -$fa-var-opera: \f26a; -$fa-var-optin-monster: \f23c; -$fa-var-orcid: \f337; -$fa-var-outdent: \f03b; -$fa-var-pagelines: \f18c; -$fa-var-paint-brush: \f1fc; -$fa-var-paper-plane: \f1d8; -$fa-var-paper-plane-o: \f1d9; -$fa-var-paperclip: \f0c6; -$fa-var-paragraph: \f1dd; -$fa-var-paste: \f0ea; -$fa-var-patreon: \f2f0; -$fa-var-pause: \f04c; -$fa-var-pause-circle: \f28b; -$fa-var-pause-circle-o: \f28c; -$fa-var-paw: \f1b0; -$fa-var-paypal: \f1ed; -$fa-var-peertube: \f2e4; -$fa-var-pencil: \f040; -$fa-var-pencil-square: \f14b; -$fa-var-pencil-square-o: \f044; -$fa-var-percent: \f295; -$fa-var-phone: \f095; -$fa-var-phone-square: \f098; -$fa-var-phone-volume: \f2a0; -$fa-var-photo: \f03e; -$fa-var-php: \f30e; -$fa-var-picture-o: \f03e; -$fa-var-pie-chart: \f200; -$fa-var-pinterest: \f0d2; -$fa-var-pinterest-p: \f231; -$fa-var-pinterest-square: \f0d3; -$fa-var-pixelfed: \f314; -$fa-var-plane: \f072; -$fa-var-play: \f04b; -$fa-var-play-circle: \f144; -$fa-var-play-circle-o: \f01d; -$fa-var-pleroma: \f324; -$fa-var-plug: \f1e6; -$fa-var-plume: \f356; -$fa-var-plus: \f067; -$fa-var-plus-circle: \f055; -$fa-var-plus-square: \f0fe; -$fa-var-plus-square-o: \f196; -$fa-var-podcast: \f2ce; -$fa-var-postgresql: \f357; -$fa-var-pound: \f154; -$fa-var-power-off: \f011; -$fa-var-print: \f02f; -$fa-var-product-hunt: \f288; -$fa-var-puzzle-piece: \f12e; -$fa-var-python: \f322; -$fa-var-qq: \f1d6; -$fa-var-qrcode: \f029; -$fa-var-question: \f128; -$fa-var-question-circle: \f059; -$fa-var-question-circle-o: \f29c; -$fa-var-quora: \f2c4; -$fa-var-quote-left: \f10d; -$fa-var-quote-right: \f10e; -$fa-var-ra: \f1d0; -$fa-var-random: \f074; -$fa-var-ravelry: \f2d9; -$fa-var-react: \f302; -$fa-var-rebel: \f1d0; -$fa-var-recycle: \f1b8; -$fa-var-reddit: \f1a1; -$fa-var-reddit-alien: \f281; -$fa-var-reddit-square: \f1a2; -$fa-var-refresh: \f021; -$fa-var-registered: \f25d; -$fa-var-remove: \f00d; -$fa-var-renren: \f18b; -$fa-var-reorder: \f0c9; -$fa-var-repeat: \f01e; -$fa-var-reply: \f112; -$fa-var-reply-all: \f122; -$fa-var-researchgate: \f338; -$fa-var-resistance: \f1d0; -$fa-var-retweet: \f079; -$fa-var-rmb: \f157; -$fa-var-road: \f018; -$fa-var-rocket: \f135; -$fa-var-rotate-left: \f0e2; -$fa-var-rotate-right: \f01e; -$fa-var-rouble: \f158; -$fa-var-rss: \f09e; -$fa-var-rss-square: \f143; -$fa-var-rub: \f158; -$fa-var-ruble: \f158; -$fa-var-rupee: \f156; -$fa-var-s15: \f2cd; -$fa-var-safari: \f267; -$fa-var-sass: \f358; -$fa-var-sass-alt: \f359; -$fa-var-save: \f0c7; -$fa-var-scissors: \f0c4; -$fa-var-scribd: \f28a; -$fa-var-scuttlebutt: \f2ea; -$fa-var-search: \f002; -$fa-var-search-minus: \f010; -$fa-var-search-plus: \f00e; -$fa-var-sellsy: \f213; -$fa-var-send: \f1d8; -$fa-var-send-o: \f1d9; -$fa-var-server: \f233; -$fa-var-shaarli: \f2f5; -$fa-var-shaarli-o: \f2f6; -$fa-var-share: \f064; -$fa-var-share-alt: \f1e0; -$fa-var-share-alt-square: \f1e1; -$fa-var-share-square: \f14d; -$fa-var-share-square-o: \f045; -$fa-var-shekel: \f20b; -$fa-var-sheqel: \f20b; -$fa-var-shield: \f132; -$fa-var-ship: \f21a; -$fa-var-shirtsinbulk: \f214; -$fa-var-shopping-bag: \f290; -$fa-var-shopping-basket: \f291; -$fa-var-shopping-cart: \f07a; -$fa-var-shower: \f2cc; -$fa-var-sign-in: \f090; -$fa-var-sign-language: \f2a7; -$fa-var-sign-out: \f08b; -$fa-var-signal: \f012; -$fa-var-signalapp: \f30c; -$fa-var-signing: \f2a7; -$fa-var-simplybuilt: \f215; -$fa-var-sitemap: \f0e8; -$fa-var-skate: \f35a; -$fa-var-sketchfab: \f35b; -$fa-var-skyatlas: \f216; -$fa-var-skype: \f17e; -$fa-var-slack: \f198; -$fa-var-sliders: \f1de; -$fa-var-slideshare: \f1e7; -$fa-var-smile-o: \f118; -$fa-var-snapchat: \f2ab; -$fa-var-snapchat-ghost: \f2ac; -$fa-var-snapchat-square: \f2ad; -$fa-var-snowdrift: \f2f1; -$fa-var-snowflake-o: \f2dc; -$fa-var-soccer-ball-o: \f1e3; -$fa-var-social-home: \f2ec; -$fa-var-sort: \f0dc; -$fa-var-sort-alpha-asc: \f15d; -$fa-var-sort-alpha-desc: \f15e; -$fa-var-sort-alpha-down: \f15d; -$fa-var-sort-alpha-up: \f15e; -$fa-var-sort-amount-asc: \f160; -$fa-var-sort-amount-desc: \f161; -$fa-var-sort-amount-down: \f160; -$fa-var-sort-amount-up: \f161; -$fa-var-sort-asc: \f0de; -$fa-var-sort-desc: \f0dd; -$fa-var-sort-down: \f0dd; -$fa-var-sort-numeric-asc: \f162; -$fa-var-sort-numeric-desc: \f163; -$fa-var-sort-numeric-down: \f162; -$fa-var-sort-numeric-up: \f163; -$fa-var-sort-up: \f0de; -$fa-var-soundcloud: \f1be; -$fa-var-space-shuttle: \f197; -$fa-var-spell-check: \f327; -$fa-var-spinner: \f110; -$fa-var-spoon: \f1b1; -$fa-var-spotify: \f1bc; -$fa-var-square: \f0c8; -$fa-var-square-o: \f096; -$fa-var-ssb: \f2ea; -$fa-var-stack-exchange: \f18d; -$fa-var-stack-overflow: \f16c; -$fa-var-star: \f005; -$fa-var-star-half: \f089; -$fa-var-star-half-empty: \f123; -$fa-var-star-half-full: \f123; -$fa-var-star-half-o: \f123; -$fa-var-star-o: \f006; -$fa-var-steam: \f1b6; -$fa-var-steam-square: \f1b7; -$fa-var-step-backward: \f048; -$fa-var-step-forward: \f051; -$fa-var-stethoscope: \f0f1; -$fa-var-sticky-note: \f249; -$fa-var-sticky-note-o: \f24a; -$fa-var-stop: \f04d; -$fa-var-stop-circle: \f28d; -$fa-var-stop-circle-o: \f28e; -$fa-var-street-view: \f21d; -$fa-var-strikethrough: \f0cc; -$fa-var-stumbleupon: \f1a4; -$fa-var-stumbleupon-circle: \f1a3; -$fa-var-subscript: \f12c; -$fa-var-subway: \f239; -$fa-var-suitcase: \f0f2; -$fa-var-sun: \f329; -$fa-var-sun-o: \f185; -$fa-var-superpowers: \f2dd; -$fa-var-superscript: \f12b; -$fa-var-support: \f1cd; -$fa-var-sync: \f021; -$fa-var-syncthing: \f311; -$fa-var-table: \f0ce; -$fa-var-tablet: \f10a; -$fa-var-tachometer: \f0e4; -$fa-var-tag: \f02b; -$fa-var-tags: \f02c; -$fa-var-tasks: \f0ae; -$fa-var-taxi: \f1ba; -$fa-var-telegram: \f2c6; -$fa-var-television: \f26c; -$fa-var-tencent-weibo: \f1d5; -$fa-var-terminal: \f120; -$fa-var-tex: \f35c; -$fa-var-text-height: \f034; -$fa-var-text-width: \f035; -$fa-var-textpattern: \f35d; -$fa-var-th: \f00a; -$fa-var-th-large: \f009; -$fa-var-th-list: \f00b; -$fa-var-themeisle: \f2b2; -$fa-var-thermometer: \f2c7; -$fa-var-thermometer-0: \f2cb; -$fa-var-thermometer-1: \f2ca; -$fa-var-thermometer-2: \f2c9; -$fa-var-thermometer-3: \f2c8; -$fa-var-thermometer-4: \f2c7; -$fa-var-thermometer-empty: \f2cb; -$fa-var-thermometer-full: \f2c7; -$fa-var-thermometer-half: \f2c9; -$fa-var-thermometer-quarter: \f2ca; -$fa-var-thermometer-three-quarters: \f2c8; -$fa-var-thumb-tack: \f08d; -$fa-var-thumbs-down: \f165; -$fa-var-thumbs-o-down: \f088; -$fa-var-thumbs-o-up: \f087; -$fa-var-thumbs-up: \f164; -$fa-var-ticket: \f145; -$fa-var-times: \f00d; -$fa-var-times-circle: \f057; -$fa-var-times-circle-o: \f05c; -$fa-var-times-rectangle: \f2d3; -$fa-var-times-rectangle-o: \f2d4; -$fa-var-tint: \f043; -$fa-var-tipeee: \f301; -$fa-var-toggle-down: \f150; -$fa-var-toggle-left: \f191; -$fa-var-toggle-off: \f204; -$fa-var-toggle-on: \f205; -$fa-var-toggle-right: \f152; -$fa-var-toggle-up: \f151; -$fa-var-tor-onion: \f32e; -$fa-var-trademark: \f25c; -$fa-var-train: \f238; -$fa-var-transgender: \f224; -$fa-var-transgender-alt: \f225; -$fa-var-trash: \f1f8; -$fa-var-trash-o: \f014; -$fa-var-tree: \f1bb; -$fa-var-trello: \f181; -$fa-var-tripadvisor: \f262; -$fa-var-trophy: \f091; -$fa-var-truck: \f0d1; -$fa-var-try: \f195; -$fa-var-tty: \f1e4; -$fa-var-tumblr: \f173; -$fa-var-tumblr-square: \f174; -$fa-var-turkish-lira: \f195; -$fa-var-tv: \f26c; -$fa-var-twitch: \f1e8; -$fa-var-twitter: \f099; -$fa-var-twitter-square: \f081; -$fa-var-umbrella: \f0e9; -$fa-var-underline: \f0cd; -$fa-var-undo: \f0e2; -$fa-var-unity: \f35e; -$fa-var-universal-access: \f29a; -$fa-var-university: \f19c; -$fa-var-unlink: \f127; -$fa-var-unlock: \f09c; -$fa-var-unlock-alt: \f13e; -$fa-var-unsorted: \f0dc; -$fa-var-unsplash: \f325; -$fa-var-upload: \f093; -$fa-var-usb: \f287; -$fa-var-usd: \f155; -$fa-var-user: \f007; -$fa-var-user-circle: \f2bd; -$fa-var-user-circle-o: \f2be; -$fa-var-user-md: \f0f0; -$fa-var-user-o: \f2c0; -$fa-var-user-plus: \f234; -$fa-var-user-secret: \f21b; -$fa-var-user-times: \f235; -$fa-var-users: \f0c0; -$fa-var-utensil-spoon: \f1b1; -$fa-var-utensils: \f0f5; -$fa-var-vcard: \f2bb; -$fa-var-vcard-o: \f2bc; -$fa-var-venus: \f221; -$fa-var-venus-double: \f226; -$fa-var-venus-mars: \f228; -$fa-var-viacoin: \f237; -$fa-var-viadeo: \f2a9; -$fa-var-viadeo-square: \f2aa; -$fa-var-video: \f03d; -$fa-var-video-camera: \f03d; -$fa-var-vimeo: \f27d; -$fa-var-vimeo-square: \f194; -$fa-var-vimeo-v: \f27d; -$fa-var-vine: \f1ca; -$fa-var-vk: \f189; -$fa-var-volume-control-phone: \f2a0; -$fa-var-volume-down: \f027; -$fa-var-volume-mute: \f32f; -$fa-var-volume-off: \f026; -$fa-var-volume-up: \f028; -$fa-var-warning: \f071; -$fa-var-wechat: \f1d7; -$fa-var-weibo: \f18a; -$fa-var-weixin: \f1d7; -$fa-var-whatsapp: \f232; -$fa-var-wheelchair: \f193; -$fa-var-wheelchair-alt: \f29b; -$fa-var-wifi: \f1eb; -$fa-var-wikidata: \f31a; -$fa-var-wikipedia-w: \f266; -$fa-var-window-close: \f2d3; -$fa-var-window-close-o: \f2d4; -$fa-var-window-maximize: \f2d0; -$fa-var-window-minimize: \f2d1; -$fa-var-window-restore: \f2d2; -$fa-var-windows: \f17a; -$fa-var-wire: \f32c; -$fa-var-won: \f159; -$fa-var-wordpress: \f19a; -$fa-var-wpbeginner: \f297; -$fa-var-wpexplorer: \f2de; -$fa-var-wpforms: \f298; -$fa-var-wrench: \f0ad; -$fa-var-xing: \f168; -$fa-var-xing-square: \f169; -$fa-var-xmpp: \f2f9; -$fa-var-y-combinator: \f23b; -$fa-var-y-combinator-square: \f1d4; -$fa-var-yahoo: \f19e; -$fa-var-yc: \f23b; -$fa-var-yc-square: \f1d4; -$fa-var-yelp: \f1e9; -$fa-var-yen: \f157; -$fa-var-yoast: \f2b1; -$fa-var-youtube: \f167; -$fa-var-youtube-play: \f16a; -$fa-var-youtube-square: \f166; -$fa-var-zotero: \f309; - diff --git a/themes/hugo-coder/assets/scss/fork-awesome/fork-awesome.scss b/themes/hugo-coder/assets/scss/fork-awesome/fork-awesome.scss deleted file mode 100644 index a24f03cc..00000000 --- a/themes/hugo-coder/assets/scss/fork-awesome/fork-awesome.scss +++ /dev/null @@ -1,28 +0,0 @@ -/*! -Fork Awesome 1.2.0 -License - https://forkaweso.me/Fork-Awesome/license - -Copyright 2018 Dave Gandy & Fork Awesome - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - - -@import "variables"; -@import "mixins"; -@import "functions"; -@import "path"; -@import "core"; -@import "larger"; -@import "fixed-width"; -@import "list"; -@import "bordered-pulled"; -@import "animated"; -@import "rotated-flipped"; -@import "stacked"; -@import "icons"; -@import "screen-reader"; diff --git a/themes/hugo-coder/docs/analytics.md b/themes/hugo-coder/docs/analytics.md deleted file mode 100644 index 45b83962..00000000 --- a/themes/hugo-coder/docs/analytics.md +++ /dev/null @@ -1,19 +0,0 @@ -# Enable Web Analytics - -## Supported Providers - -*in alphabetic order* - -* [Application Insights](https://azure.com/) - [documentation](/docs/analytics/applicationinsights.md) -* [Baidu Analytics](https://tongji.baidu.com/) - [documentation](/docs/analytics/baidu.md) -* [Cloudflare](https://www.cloudflare.com/analytics/) - [documentation](/docs/analytics/cloudflare.md) -* [Google Analytics](https://developers.google.com/analytics) - [documentation](/docs/analytics/googleanalytics.md) -* [Google Tag Manager](https://developers.google.com/tag-manager) - [documentation](/docs/analytics/googletagmanager.md) -* [Fathom Analytics](https://usefathom.com/) - [documentation](/docs/analytics/fathom.md) -* [Goat Counter](https://www.goatcounter.com/) - [documentation](/docs/analytics/goatcounter.md) -* [Matomo](https://matomo.org/) - [documentation](/docs/analytics/matomo.md) -* [Micro Analytics](https://microanalytics.io/) - [documentation](/docs/analytics/microanalytics.md) -* [Pirsch](https://pirsch.io/) - [documentation](/docs/analytics/pirsch.md) -* [Plausible Analytics](https://plausible.io/) - [documentation](/docs/analytics/plausible.md) -* [Umami](https://umami.is/) - [documentation](/docs/analytics/umami.md) -* [Wide Angle Analytics](https://wideangle.co/) - [documentation](/docs/analytics/wideangle.md) diff --git a/themes/hugo-coder/docs/analytics/applicationinsights.md b/themes/hugo-coder/docs/analytics/applicationinsights.md deleted file mode 100644 index 859feaa8..00000000 --- a/themes/hugo-coder/docs/analytics/applicationinsights.md +++ /dev/null @@ -1,6 +0,0 @@ -# Application Insights - -```toml -[params.applicationInsights] - connectionString = "connectionstring" # https://docs.microsoft.com/en-us/azure/azure-monitor/app/sdk-connection-string -``` diff --git a/themes/hugo-coder/docs/analytics/baidu.md b/themes/hugo-coder/docs/analytics/baidu.md deleted file mode 100644 index e71a3bec..00000000 --- a/themes/hugo-coder/docs/analytics/baidu.md +++ /dev/null @@ -1,25 +0,0 @@ -# Baidu - -To use Baidu analytics in this theme, navigate to the setting page of your -site at . You will get a piece of JS code provided -as follows: - -```js - -``` - -Then you can use it by filling your site toke `your_token_at_here` in the -config file: - -```toml -[params.baidu] - token = "your_token_at_here" -``` diff --git a/themes/hugo-coder/docs/analytics/cloudflare.md b/themes/hugo-coder/docs/analytics/cloudflare.md deleted file mode 100644 index 84f11712..00000000 --- a/themes/hugo-coder/docs/analytics/cloudflare.md +++ /dev/null @@ -1,6 +0,0 @@ -# Cloudflare - -```toml -[params.cloudflare] - token = "token" -``` diff --git a/themes/hugo-coder/docs/analytics/fathom.md b/themes/hugo-coder/docs/analytics/fathom.md deleted file mode 100644 index d2a8c190..00000000 --- a/themes/hugo-coder/docs/analytics/fathom.md +++ /dev/null @@ -1,7 +0,0 @@ -# Fathom Analytics - -```toml -[params.fathomAnalytics] - siteID = "ABCDE" - serverURL = "cdn.usefathom.com" # (optionnal) Replace if you use a custom domain -``` \ No newline at end of file diff --git a/themes/hugo-coder/docs/analytics/goatcounter.md b/themes/hugo-coder/docs/analytics/goatcounter.md deleted file mode 100644 index b217b5a7..00000000 --- a/themes/hugo-coder/docs/analytics/goatcounter.md +++ /dev/null @@ -1,6 +0,0 @@ -# Goat Counter - -```toml -[params.goatCounter] - code = "code" # You will access your account at https://[code].goatcounter.com -``` diff --git a/themes/hugo-coder/docs/analytics/googleanalytics.md b/themes/hugo-coder/docs/analytics/googleanalytics.md deleted file mode 100644 index ae6c838d..00000000 --- a/themes/hugo-coder/docs/analytics/googleanalytics.md +++ /dev/null @@ -1,3 +0,0 @@ -# Google Analytics - -Follow [these steps](https://gohugo.io/templates/internal/#configure-google-analytics). \ No newline at end of file diff --git a/themes/hugo-coder/docs/analytics/googletagmanager.md b/themes/hugo-coder/docs/analytics/googletagmanager.md deleted file mode 100644 index 94664e1c..00000000 --- a/themes/hugo-coder/docs/analytics/googletagmanager.md +++ /dev/null @@ -1,6 +0,0 @@ -# Google Tag Manager - -```toml -[params.googleTagManager] - id = "gid" -``` diff --git a/themes/hugo-coder/docs/analytics/matomo.md b/themes/hugo-coder/docs/analytics/matomo.md deleted file mode 100644 index f751330c..00000000 --- a/themes/hugo-coder/docs/analytics/matomo.md +++ /dev/null @@ -1,7 +0,0 @@ -# Matomo - -```toml -[params.matomo] - siteID = "ABCDE" - serverURL = "analytics.example.com" -``` diff --git a/themes/hugo-coder/docs/analytics/microanalytics.md b/themes/hugo-coder/docs/analytics/microanalytics.md deleted file mode 100644 index d1be070f..00000000 --- a/themes/hugo-coder/docs/analytics/microanalytics.md +++ /dev/null @@ -1,7 +0,0 @@ -# microanalytics.io - -```toml -[params.microAnalytics] - id = "ABCDE" - dnt = "false" # respect DNT tracker, "true" by default -``` diff --git a/themes/hugo-coder/docs/analytics/pirsch.md b/themes/hugo-coder/docs/analytics/pirsch.md deleted file mode 100644 index 8cfceabd..00000000 --- a/themes/hugo-coder/docs/analytics/pirsch.md +++ /dev/null @@ -1,6 +0,0 @@ -# Pirsch - -```toml -[params.pirsch] - code = "ABCDE" -``` \ No newline at end of file diff --git a/themes/hugo-coder/docs/analytics/plausible.md b/themes/hugo-coder/docs/analytics/plausible.md deleted file mode 100644 index 3952da9e..00000000 --- a/themes/hugo-coder/docs/analytics/plausible.md +++ /dev/null @@ -1,7 +0,0 @@ -# Plausible Analytics - -```toml -[params.plausibleAnalytics] - domain = "example.com" - serverURL = "plausible.io" # (optionnal) Replace if you use a custom domain -``` diff --git a/themes/hugo-coder/docs/analytics/umami.md b/themes/hugo-coder/docs/analytics/umami.md deleted file mode 100644 index 9cf38c2c..00000000 --- a/themes/hugo-coder/docs/analytics/umami.md +++ /dev/null @@ -1,7 +0,0 @@ -# Umami - -```toml -[params.umami] - siteID = "ABCDE" - serverURL = "analytics.example.com" -``` diff --git a/themes/hugo-coder/docs/analytics/wideangle.md b/themes/hugo-coder/docs/analytics/wideangle.md deleted file mode 100644 index e4963c3f..00000000 --- a/themes/hugo-coder/docs/analytics/wideangle.md +++ /dev/null @@ -1,25 +0,0 @@ -# Wide Angle Analytics - -Wide Angle Analytics is strictly **GDPR** compliant, privacy-first web analytics. Head to [wideangle.co](https://wideangle.co/) and create you free trial account now. - -## Configuration - -Built in template supports following configuration options: - -param | description | required | dafault -------|-------------|----------|-------- -siteID| A site ID from [Wide Angle site](https://wideangle.co/documentation/web-analytics-for-your-website)| :white_check_mark: | n/a -serverURL | [Domain](https://wideangle.co/documentation/track-with-custom-domain) with which the WAA is configured with | :x: | stats.wideangle.co -fingerprint | Enable advanced session fingerprint; requires user consent | :x: | false -supressDNT | Ignore Do-Not-Track setting in the browser | :x: | false - - -## Example - -```toml -[params.wideAngleAnalytics] - siteID = "1D4EG3B9ACA03F4243" - serverURL = "fyi.wideangle.co" - fingerprint = "false" - supressDNT = "false" -``` diff --git a/themes/hugo-coder/docs/comment-system.md b/themes/hugo-coder/docs/comment-system.md deleted file mode 100644 index 00d7bdd4..00000000 --- a/themes/hugo-coder/docs/comment-system.md +++ /dev/null @@ -1 +0,0 @@ -WIP diff --git a/themes/hugo-coder/docs/configurations.md b/themes/hugo-coder/docs/configurations.md deleted file mode 100644 index 58f0a5a7..00000000 --- a/themes/hugo-coder/docs/configurations.md +++ /dev/null @@ -1,334 +0,0 @@ -# Configurations - -* [About Hugo Configurations](#about-hugo-configurations) - * [Analytics](/docs/analytics.md) - * [Commenting Systems](#commenting-systems) - * [Disqus](#disqus) - * [Commento](#commento) - * [Utterances](#utterances) - * [Giscus](#giscus) -* [Syntax Highlight](#syntax-highlight) -* [Theme Parameters](#theme-parameters) - * [Social Icons Configuration](#social-icons-configuration) - * [Menu Items Configurations](#menu-items-configurations) - * [CSP](#csp) -* [Complete Example](#complete-example) -* [Front Matter](#front-matter) - * [Posts](#posts) - -## About Hugo Configurations - -This theme supports: - -* [Analytics](/docs/analytics.md) - -* Commenting Systems - * [Disqus](https://disqus.com/) - * [Commento](https://commento.io/) - * [Utterances](https://utteranc.es/) - * [Giscus](https://giscus.app/) - * [Telegram](https://comments.app/) - -### Commenting Systems - -Comments are displayed within post pages, but can be disabled with `disableComments` front-matter. - -#### Disqus - -Follow [these steps](https://gohugo.io/content-management/comments/#configure-disqus). - -#### Commento - -```toml -[params] - commentoURL = "https://cdn.commento.io" # Replace if you use a custom domain -``` - -#### Utterances - -```toml -[params.utterances] - repo = "" # https://utteranc.es/#heading-repository - issueTerm = "" # https://utteranc.es/#heading-mapping - label = "" # https://utteranc.es/#heading-issue-label - theme = "" # https://utteranc.es/#heading-theme -``` - -#### Giscus - -```toml -[params.giscus] # https://giscus.app - repo = "" - repoID = "" - category = "" - categoryID = "" - mapping = "" - term = "" - strict = "" - reactionsEnabled = "" - emitMetadata = "" - inputPosition = "" - theme = "" - lang = "" - loading = "" -``` - -#### Telegram - -```toml -[params.telegram] # https://comments.app/ - siteID = "" - limit = "" - height = "" - color = "" - dislikes = "" - outlined = "" - colorful = "" - dark = "" -``` - -## Syntax Highlight - -The theme uses the Goldmark syntax highlight system. GitHub light and dark are set as the default styles. To choose a different style, make sure `noClasses` is not set to false (default is true) and add to your `config.toml`: - -``` -[markup.highlight] -style = "monokai" -``` - -All `style` are available [here](https://xyproto.github.io/splash/docs/all.html). - -Alternatively, it is possible to use custom styles with generated CSS files. See [here](https://gohugo.io/content-management/syntax-highlighting/#generate-syntax-highlighter-css). - -## Theme Parameters - -These are all the parameters used by `hugo-coder` theme. - -| Name | Type | Required | Description | Default | Example | -| ----------------------------- | ------ | -------- | ------------------------------------------------ | -------------------------------- | ------------------------------------------------ | -| author | string | Yes | Author name. | | `"John Doe"` | -| info | string | Yes | An headline, job title or similar. | | `"Full Stack Developer"` | -| description | string | Yes | Description of the site. | | `"John Doe's personal website"` | -| keywords | string | Yes | Site keywords. | | `"blog,developer,personal"` | -| avatarURL | string | No | Photo of the author. | | `"images/avatar.jpg"` | -| gravatar | string | No | Gravatar photo of the author | | `"john.doe@example.com"` | -| faviconSVG | string | No | Custom path to a SCG favicon. | `"/img/favicon.svg"` | `"/img/favicon.svg"` | -| favicon_32 | string | No | Custom path to a 32x32 favicon. | `"/img/favicon-32x32.png"` | `"/img/favicon-32x32.png"` | -| favicon_16 | string | No | Custom path to a 16x16 favicon. | `"/img/favicon-16x16.png"` | `"/img/favicon-16x16.png"` | -| touchIcon | string | No | Custom path to a touch-icon | `"/images/apple-touch-icon.png"` | `"/images/apple-touch-icon.png"` | -| since | string | No | Date shown in the footer before now year | | `"2020"` | -| maxSeeAlsoItems | number | No | Series see also post count | `5` | `10` | -| commit | string | No | Show the last git commit in the footer | | `"https://github.com/luizdepra/hugo-coder/tree/"`| -| rtl | bool | No | Enable the Right To Left mode. | `false` | `true` or `false` | -| math | bool | No | Enable MathJax Module and add JS into your site. | `false` | `true` or `false` | -| katex | bool | No | Enable katex for all content types. | `false` | `true` or `false` | -| colorScheme | string | No | Specify light/dark colorscheme | `"auto"` | `"auto"` or `"light"` or `"dark"` | -| hideColorSchemeToggle | bool | No | If true, hides the color sheme toggle | `false` | `true` or `false` | -| customCSS | list | No | Add extra CSS files to the website. | [] | `["css/extra-style.css"]` | -| customSCSS | list | No | Add extra SCSS files to the website. | [] | `["scss/extra-style.scss"]` | -| customJS | list | No | Add extra JS files to the website. | [] | `["js/extra-script.js"]` | -| customRemoteJS | list | No | Add extra remote JS files to the website. | [] | `["https://www.example.com/file.js"]` | -| enableTwemoji | bool | No | Adds support for Twemoji | `false` | `true` or `false` | - -### Social Icons Configuration - -Social Icons are optional. To use them you will need to set at least all the following required parameters for each icon. - -| Configuration | Type | Required | Description | Example | -| -------------- | ------ | -------- | ---------------------------------------- | ------------------------------- | -| name | string | Yes | Icon name. | `"Github"` | -| icon | string | Yes | ForkAwesome icon classes. | `"fa fa-github"` | -| weight | int | Yes | Icon order. | `1` | -| url | string | Yes | URL to redirect. | `"https://github.com/johndoe/"` | - -An example: - -```toml -[[params.social]] - name = "Github" - icon = "fa fa-github fa-2x" - weight = 1 - url = "https://github.com/johndoe/" -[[params.social]] - name = "Gitlab" - icon = "fa fa-gitlab fa-2x" - weight = 2 - url = "https://gitlab.com/johndoe/" -[[params.social]] - name = "Twitter" - icon = "fa fa-twitter fa-2x" - weight = 3 - url = "https://twitter.com/johndoe/" -``` - -### Menu Items Configurations - -Menu Items are optional. To use them you will need to set all the following required parameters for each icon. - -| Configuration | Type | Required | Description | Example | -| -------------- | ------ | -------- | ---------------------------------------- | ------------------------------- | -| name | string | Yes | Menu Item name. | `"Posts"` | -| weight | int | Yes | Menu Item order. | `1` | -| url | string | Yes | URL to redirect. | `"/posts/"` | -| target | string | No | URL target attribute. | `"_blank"` | -| rel | string | No | URL rel attribute. | `"alternate"` | -| type | string | No | URL type attribute. | `"application/rss+xml"` | - -An example: - -```toml -[[menu.main]] - name = "Blog" - weight = 1 - url = "posts/" -[[menu.main]] - name = "About" - weight = 2 - url = "about/" -``` - -### CSP - -CSP stands for [Content Security Policy](https://developers.google.com/web/fundamentals/security/csp). These configurations are optional. To use them you will need to set all the following required parameters. See [here](https://developers.google.com/web/fundamentals/security/csp#policy_applies_to_a_wide_variety_of_resources) for reference. - -| Configuration | Type | Required | Description | Example | -| -------------- | ----------- | -------- | ----------- | ------------------------------- | -| childsrc | string list | Yes | | `["'self'"]` | -| fontsrc | string list | Yes | | `["'self'"]` | -| formaction | string list | Yes | | `["'self'"]` | -| framesrc | string list | Yes | | `["'self'"]` | -| imgsrc | string list | Yes | | `["'self'"]` | -| objectsrc | string list | Yes | | `["'self'"]` | -| stylesrc | string list | Yes | | `["'self'"]` | -| scriptsrc | string list | Yes | | `["'self'"]` | -| prefetchsrc | string list | Yes | | `["'self'"]` | - -An example: - -```toml -[params.csp] - childsrc = ["'self'"] - fontsrc = [ - "'self'", - "https://fonts.gstatic.com", - "https://cdn.jsdelivr.net/" - ] - formaction = ["'self'"] - framesrc = ["'self'"] - imgsrc = ["'self'"] - objectsrc = ["'none'"] - stylesrc = [ - "'self'", - "'unsafe-inline'", - "https://fonts.googleapis.com/", - "https://cdn.jsdelivr.net/" - ] - scriptsrc = [ - "'self'", - "'unsafe-inline'", - "https://www.google-analytics.com" - ] - prefetchsrc = ["'self'"] -``` - -## Complete Example - -This is a complete configuration example with some recommended values. - -```toml -baseurl = "http://www.example.com" -title = "johndoe" -theme = "hugo-coder" -languagecode = "en" -defaultcontentlanguage = "en" - -paginate = 20 - -disqusShortname = "yourdiscussshortname" - -[markup.highlight] -style = "github-dark" - -[params] - author = "John Doe" - info = "Full Stack DevOps and Magician" - description = "John Doe's personal website" - keywords = "blog,developer,personal" - avatarurl = "images/avatar.jpg" - #gravatar = "john.doe@example.com" - - faviconSVG = "/img/favicon.svg" - favicon_32 = "/img/favicon-32x32.png" - favicon_16 = "/img/favicon-16x16.png" - - since = 2019 - - enableTwemoji = true - - colorScheme = "auto" - hidecolorschemetoggle = false - - # customCSS = ["css/custom.css"] - # customSCSS = ["scss/custom.scss"] - # customJS = ["js/custom.js"] - -[taxonomies] - category = "categories" - series = "series" - tag = "tags" - author = "authors" - -# Social links -[[params.social]] - name = "Github" - icon = "fa fa-github fa-2x" - weight = 1 - url = "https://github.com/johndoe/" -[[params.social]] - name = "Gitlab" - icon = "fa fa-gitlab fa-2x" - weight = 2 - url = "https://gitlab.com/johndoe/" -[[params.social]] - name = "Twitter" - icon = "fa fa-twitter fa-2x" - weight = 3 - url = "https://twitter.com/johndoe/" - -# Menu links -[[menu.main]] - name = "Blog" - weight = 1 - url = "posts/" -[[menu.main]] - name = "About" - weight = 2 - url = "about/" -``` - -## Front Matter - -Hugo documentation: https://gohugo.io/content-management/front-matter - -This theme includes one content type: - -* [Posts](#posts), useful to display blog posts - -### Posts - -These are the front matter variables used by `hugo-coder` theme. - -| Name | Type | Required | Description | Default | Example | -| ---------------- | ------ | -------- | -------------------------------------------------- | ------- | ------------------------------------------------------------------------------- | -| tags | list | No | Add tag(s) to this post. | | `["Hugo", "Go"]` | -| categories | list | No | Add categorie(s) to this post. | | `["Hugo", "Go"]` | -| series | list | No | Add series to this post (used by OpenGraph). | | `["Theme Demo"]` | -| author | list | No | Add author to this post. | | `["John Doe"]` | -| externalLink | string | No | Link to an external post. | | `"https://github.com/luizdepra/hugo-coder/wiki"` | -| featuredImage | string | No | Link/path to add an image below post metadata. | | `"https://github.com/luizdepra/hugo-coder/blob/master/images/screenshot.png"` | -| math | bool | No | If true, MathJax is enabled only for this post. | `false` | `true` or `false` | -| katex | bool | No | If true, katex is enabled only for this post. | `false` | `true` or `false` | -| disableComments | bool | No | If true, comments are disabled. | `false` | `true` or `false` | -| canonicalUrl | string | No | Link to override in | `false` | `"https://my-company.com/blog/my-blog-post-that-I-repost-without-hurtiong-seo"` | - -> "tags", "categories", "series" and "authors" are taxonomies defined in the `config.toml` file. diff --git a/themes/hugo-coder/docs/contributing.md b/themes/hugo-coder/docs/contributing.md deleted file mode 100644 index 00d7bdd4..00000000 --- a/themes/hugo-coder/docs/contributing.md +++ /dev/null @@ -1 +0,0 @@ -WIP diff --git a/themes/hugo-coder/docs/faq.md b/themes/hugo-coder/docs/faq.md deleted file mode 100644 index 00d7bdd4..00000000 --- a/themes/hugo-coder/docs/faq.md +++ /dev/null @@ -1 +0,0 @@ -WIP diff --git a/themes/hugo-coder/docs/home.md b/themes/hugo-coder/docs/home.md deleted file mode 100644 index 9267ad0e..00000000 --- a/themes/hugo-coder/docs/home.md +++ /dev/null @@ -1,19 +0,0 @@ -# Welcome to the hugo-coder docs! - -## Basic Usage - -* [Quick Start](quick-start.md) -* [Configurations](configurations.md) -* [FAQ](faq.md) - -## Extra Guides - -* [Multilingual Mode](multilingual-mode.md) -* [Comment System](comment-system.md) -* [Analytics](analytics.md) - -## Maintainers & Developers - -* [Contributing](contributing.md) - - diff --git a/themes/hugo-coder/docs/multilingual-mode.md b/themes/hugo-coder/docs/multilingual-mode.md deleted file mode 100644 index 8384e6d5..00000000 --- a/themes/hugo-coder/docs/multilingual-mode.md +++ /dev/null @@ -1,73 +0,0 @@ -# Multilingual-Mode - -* [Available Languages](#available-languages) -* [Configure Languages](#configure-languages) -* [Translation File Example](#translation-file-example) - -## Available Languages - -This theme supports the following languages: - -- Arabic -- Bengali -- Czech -- German -- English -- Spanish -- Finnish -- French -- Hebrew -- Hindi -- Hungarian -- Italian -- Japanese -- Malay -- Dutch -- Polish -- Brazilian Portuguese -- Romanian -- Russian -- Swedish -- Slovak -- Turkish -- Simplified Chinese -- Taiwan Chinese - -## Configure languages - -Go to [this Hugo documentation page](https://gohugo.io/content-management/multilingual/#configure-languages) to configure one or multiple languages for your website. - -## Translation File Example - -```toml -[category] -other = "category" - -[tag] -other = "tag" - -[series] -other = "series" - -[author] -other = "author" - -[reading_time] -one = "One-minute read" -other = "{{ .Count }}-minute read" - -[page_not_found] -other = "Page Not Found" - -[page_does_not_exist] -other = "Sorry, this page does not exist." - -[head_back] -other = "You can head back to homepage." - -[powered_by] -other = "Powered by" - -[see_also] -other = "See also in" -``` diff --git a/themes/hugo-coder/docs/quick-start.md b/themes/hugo-coder/docs/quick-start.md deleted file mode 100644 index 22cf8dd2..00000000 --- a/themes/hugo-coder/docs/quick-start.md +++ /dev/null @@ -1,23 +0,0 @@ -# Quick Start - -To start using `hugo-coder`: - -1. Add the repository into your Hugo Project repository as a submodule, `git submodule add https://github.com/luizdepra/hugo-coder.git themes/coder`. -2. Configure your `config.toml`. You can use [this minimal configuration](https://github.com/luizdepra/hugo-coder/blob/main/docs/configurations.md#complete-example) as a base. The [`config.toml`](https://github.com/luizdepra/hugo-coder/blob/master/exampleSite/config.toml) inside the [exampleSite](https://github.com/luizdepra/hugo-coder/tree/master/exampleSite) from the `exampleSite` is also a good reference. -3. Build your site with `hugo serve` and see the result at `http://localhost:1313/`. - -If you just want to test this theme, go to [this page](https://themes.gohugo.io/theme/hugo-coder/). - -You can also clone or download it, then run these commands: - -``` -git clone https://github.com/luizdepra/hugo-coder.git - -cd hugo-coder - -make demo -``` - -You'll see the result at [http://localhost:1313/](http://localhost:1313/). - -> These last two methods don't use the same content directory, the first one leads to 404 pages for some pages. More info [here](https://github.com/gohugoio/hugoThemes#adding-a-theme-to-the-list). diff --git a/themes/hugo-coder/go.mod b/themes/hugo-coder/go.mod deleted file mode 100644 index 9b2ac45e..00000000 --- a/themes/hugo-coder/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/luizdepra/hugo-coder - -go 1.16 diff --git a/themes/hugo-coder/i18n/ar.toml b/themes/hugo-coder/i18n/ar.toml deleted file mode 100644 index 9de2101b..00000000 --- a/themes/hugo-coder/i18n/ar.toml +++ /dev/null @@ -1,63 +0,0 @@ -[categories] -one = "فئة" -other = "categories" - -[tags] -one = "وَسم" -other = "tags" - -[series] -one = "سلسلة" -other = "series" - -[authors] -one = "الكاتب" -other = "authors" - -[posts] -other = "المنشورات" - -[reading_time] -other = "تستغرق {{ .Count }} د" - -[page_not_found] -other = "الصفحة غير موجودة" - -[page_does_not_exist] -other = "هذه الصفحة غير موجودة" - -[head_back] -other = "بإمكانك العودة إلى homepage." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "بواسطة" - -[see_also] -other = "انظر أيضاً" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/bn.toml b/themes/hugo-coder/i18n/bn.toml deleted file mode 100644 index d5b2dd84..00000000 --- a/themes/hugo-coder/i18n/bn.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "বিভাগ" -other = "বিভাগসমূহ" - -[tags] -one = "ট্যাগ" -other = "ট্যাগসমূহ" - -[series] -one = "সিরিজ" -other = "সিরিজ" - -[authors] -one = "লেখক" -other = "লেখকসমূহ" - -[posts] -other = "সব পোস্ট" - -[reading_time] -one = "পড়তে এক মিনিট লাগবে" -other = "পড়তে {{ .Count }} মিনিট লাগবে" - -[page_not_found] -other = "পাতাটি পাওয়া যায় নি" - -[page_does_not_exist] -other = "দুঃখিত, কাঙ্ক্ষিত পাতাটির অস্তিত্ব নেই" - -[head_back] -other = "আপনি নীড়পাতায় ফিরে যেতে পারেন" - -[licensed_under] -other = "লাইসেন্স:" - -[powered_by] -other = "চালনা:" - -[see_also] -other = "আরও দেখুন" - -[note] -other = "নোট" - -[tip] -other = "টিপ" - -[example] -other = "উদাহরণ" - -[question] -other = "প্রশ্ন" - -[info] -other = "তথ্য" - -[warning] -other = "নির্দেশ" - -[error] -other = "ত্রুটি" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/cs.toml b/themes/hugo-coder/i18n/cs.toml deleted file mode 100644 index 93047862..00000000 --- a/themes/hugo-coder/i18n/cs.toml +++ /dev/null @@ -1,63 +0,0 @@ -[categories] -one = "kategorie" -other = "categories" - -[tags] -one = "tag" -other = "tags" - -[series] -one = "série" -other = "series" - -[authors] -one = "author" -other = "authors" - -[posts] -other = "posts" - -[reading_time] -other = "Délka čtení: {{ .Count }}" - -[page_not_found] -other = "Stránka nenalezena" - -[page_does_not_exist] -other = "Omlouváme se, stránka nebyla nalezena." - -[head_back] -other = "Můžete se vrátit na homepage." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Stránka používá" - -[see_also] -other = "Podívejte se také na" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/de.toml b/themes/hugo-coder/i18n/de.toml deleted file mode 100644 index 521fe0fc..00000000 --- a/themes/hugo-coder/i18n/de.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "Kategorie" -other = "Kategorien" - -[tags] -one = "Tag" -other = "Tags" - -[series] -one = "Serie" -other = "Serien" - -[authors] -one = "Autor" -other = "Autoren" - -[posts] -other = "Beiträge" - -[reading_time] -one = "Eine Minute Lesezeit" -other = "{{ .Count }} Minuten Lesezeit" - -[page_not_found] -other = "Seite nicht gefunden" - -[page_does_not_exist] -other = "Tut mir Leid, die Seite existiert leider nicht." - -[head_back] -other = "Du kannst hier zurück zur Startseite." - -[licensed_under] -other = "Lizenziert unter" - -[powered_by] -other = "Gestaltet mit" - -[see_also] -other = "Siehe auch in" - -[note] -other = "Bemerkung" - -[tip] -other = "Tipp" - -[example] -other = "Beispiel" - -[question] -other = "Frage" - -[info] -other = "Info" - -[warning] -other = "Warnung" - -[error] -other = "Fehler" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/en.toml b/themes/hugo-coder/i18n/en.toml deleted file mode 100644 index 987586d2..00000000 --- a/themes/hugo-coder/i18n/en.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "category" -other = "categories" - -[tags] -one = "tag" -other = "tags" - -[series] -one = "series" -other = "series" - -[authors] -one = "author" -other = "authors" - -[posts] -other = "posts" - -[reading_time] -one = "One-minute read" -other = "{{ .Count }}-minute read" - -[page_not_found] -other = "Page Not Found" - -[page_does_not_exist] -other = "Sorry, this page does not exist." - -[head_back] -other = "You can head back to the homepage." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Powered by" - -[see_also] -other = "See also in" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/es.toml b/themes/hugo-coder/i18n/es.toml deleted file mode 100644 index 8300621a..00000000 --- a/themes/hugo-coder/i18n/es.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "categoría" -other = "categorías" - -[tags] -one = "etiqueta" -other = "etiquetas" - -[series] -one = "serie" -other = "series" - -[authors] -one = "autor" -other = "autores" - -[posts] -other = "publicaciones" - -[reading_time] -one = "Un minuto de lectura" -other = "{{ .Count }} minutos de lectura." - -[page_not_found] -other = "Página no encontrada." - -[page_does_not_exist] -other = "Disculpa, la página no existe." - -[head_back] -other = "Puedes regresar a la página inicial." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Desarrollado por" - -[see_also] -other = "También ver en" - -[note] -other = "nota" - -[tip] -other = "consejo" - -[example] -other = "ejemplo" - -[question] -other = "pregunta" - -[info] -other = "información" - -[warning] -other = "advertencia" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/fi.toml b/themes/hugo-coder/i18n/fi.toml deleted file mode 100644 index 24856162..00000000 --- a/themes/hugo-coder/i18n/fi.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "kategoria" -other = "categories" - -[tags] -one = "merkki" -other = "tags" - -[series] -one = "sarja" -other = "series" - -[authors] -one = "Kirjoittaja" -other = "authors" - -[posts] -other = "Artikkelit" - -[reading_time] -one = "Yksi lukuminuutti" -other = "{{ .Count }} lukuminuuttia" - -[page_not_found] -other = "Sivua ei löydetty" - -[page_does_not_exist] -other = "Valitettavasti tätä sivua ei ole olemassa." - -[head_back] -other = "Voit palata takaisin kotisivulle." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Tarjoaa" - -[see_also] -other = "Katso myös" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/fr.toml b/themes/hugo-coder/i18n/fr.toml deleted file mode 100644 index f51fd404..00000000 --- a/themes/hugo-coder/i18n/fr.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "catégorie" -other = "catégories" - -[tags] -one = "tag" -other = "tags" - -[series] -one = "série" -other = "séries" - -[authors] -one = "auteur" -other = "auteurs" - -[posts] -other = "articles" - -[reading_time] -one = "Une minute de lecture" -other = "{{ .Count }} minutes de lecture" - -[page_not_found] -other = "Page Non Trouvée" - -[page_does_not_exist] -other = "Désolé, cette page n'existe pas." - -[head_back] -other = "Vous pouvez revenir à l'accueil." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Propulsé par" - -[see_also] -other = "Voir aussi dans" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/he.toml b/themes/hugo-coder/i18n/he.toml deleted file mode 100644 index 141b7576..00000000 --- a/themes/hugo-coder/i18n/he.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "קטגוריה" -other = "categories" - -[tags] -one = "תגית" -other = "tags" - -[series] -one = "סדרה" -other = "series" - -[authors] -one = "סופר" -other = "authors" - -[posts] -other = "פוסטים" - -[reading_time] -one = "דקה אחת לקרוא" -other = "דקות לקרוא {{ .Count }}" - -[page_not_found] -other = "דף האינטרנט שביקשת לא נמצא" - -[page_does_not_exist] -other = "מצטערים, דף אינטרנט זה אינו קיים" - -[head_back] -other = " אתה יכול לחזורלדף הבית." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "מופעל על ידי" - -[see_also] -other = "רואה עוד ב" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/hi.toml b/themes/hugo-coder/i18n/hi.toml deleted file mode 100644 index 2614982d..00000000 --- a/themes/hugo-coder/i18n/hi.toml +++ /dev/null @@ -1,65 +0,0 @@ -[categories] -one = "श्रेणी" -other = "श्रेणियाँ" - -[tags] -one = "टैग" -other = "टैग्स" - -[series] -one = "श्रृंखला" -other = "श्रृंखलाएँ" - -[authors] -one = "लेखक" -other = "लेखक" - -[posts] -other = "सामग्री" - -[reading_time] -one = "पढ़ने में एक मिनट लगेगा" -other = "पढ़ने में {{ .Count }} मिनट लगेंगे" - -[page_not_found] -other = "पृष्ठ नहीं मिला" - -[page_does_not_exist] -other = "क्षमा करें, यह पृष्ठ मौजूद नहीं है।" - -[head_back] -other = "आप मुखपृष्ठ पर वापस आ सकते हैं।" - -[licensed_under] -other = "लाइसेंस:" - -[powered_by] -other = "संचालन:" - -[see_also] -other = "यह भी देखें" - -[note] -other = "नोट" - -[tip] -other = "टिप्पणी" - -[example] -other = "उदाहरण" - -[question] -other = "प्रश्न" - -[info] -other = "जानकारी" - -[warning] -other = "चेतावनी" - -[error] -other = "त्रुटि" - - -[link_to_heading] -other = "हैडिंग की कड़ी" diff --git a/themes/hugo-coder/i18n/hu.toml b/themes/hugo-coder/i18n/hu.toml deleted file mode 100644 index fb47d014..00000000 --- a/themes/hugo-coder/i18n/hu.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "Kategória" -other = "categories" - -[tags] -one = "Címke" -other = "tags" - -[series] -one = "Sorozat" -other = "series" - -[authors] -one = "Szerző" -other = "authors" - -[posts] -other = "Írások" - -[reading_time] -one = "Egyperces" -other = "{{ .Count }} percnyi olvasnivaló" - -[page_not_found] -other = "Az oldal nem található" - -[page_does_not_exist] -other = "Sajnálom, a kért oldal nem létezik." - -[head_back] -other = "Itt visszaléphet a Főoldalra." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Motor:" - -[see_also] -other = "Lásd még" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/id.toml b/themes/hugo-coder/i18n/id.toml deleted file mode 100644 index f556d0e8..00000000 --- a/themes/hugo-coder/i18n/id.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "kategori" -other = "categories" - -[tags] -one = "label" -other = "tags" - -[series] -one = "seri" -other = "series" - -[authors] -one = "penulis" -other = "authors" - -[posts] -other = "artikel" - -[reading_time] -one = "bacaan satu menit" -other = "bacaan {{ .Count }} menit" - -[page_not_found] -other = "Halaman tidak ditemukan" - -[page_does_not_exist] -other = "Maaf, halaman ini tidak ada." - -[head_back] -other = "Anda dapat kembali ke beranda." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Dipersembahkan oleh" - -[see_also] -other = "artikel terkait" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/it.toml b/themes/hugo-coder/i18n/it.toml deleted file mode 100644 index 9c8a6da6..00000000 --- a/themes/hugo-coder/i18n/it.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "categoria" -other = "categorie" - -[tags] -one = "tag" -other = "tags" - -[series] -one = "serie" -other = "serie" - -[authors] -one = "autore" -other = "autori" - -[posts] -other = "post" - -[reading_time] -one = "Lettura di un minuto" -other = "{{ .Count }} minuti di lettura" - -[page_not_found] -other = "Pagina non trovata" - -[page_does_not_exist] -other = "Siamo spiacenti, questa pagina non esiste." - -[head_back] -other = "Torna alla homepage." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Generato da" - -[see_also] -other = "Vedi anche in" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/ja.toml b/themes/hugo-coder/i18n/ja.toml deleted file mode 100644 index 4a7e3031..00000000 --- a/themes/hugo-coder/i18n/ja.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "カテゴリー" -other = "カテゴリー" - -[tags] -one = "タグ" -other = "タグ" - -[series] -one = "シリーズ" -other = "シリーズ" - -[authors] -one = "著者" -other = "著者" - -[posts] -other = "記事" - -[reading_time] -one = "1分で読めます" -other = "{{ .Count }}分で読めます" - -[page_not_found] -other = "ページが見つかりません" - -[page_does_not_exist] -other = "申し訳ございません。アクセスしようとしたページが見つかりませんでした。" - -[head_back] -other = "トップページからお探しいただきますようお願い申し上げます。" - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Powered by" - -[see_also] -other = "関連記事:" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "見出しへのリンク" diff --git a/themes/hugo-coder/i18n/ko.toml b/themes/hugo-coder/i18n/ko.toml deleted file mode 100644 index a34c8f60..00000000 --- a/themes/hugo-coder/i18n/ko.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "카테고리" -other = "categories" - -[tags] -one = "태그" -other = "tags" - -[series] -one = "시리즈" -other = "series" - -[authors] -one = "저자" -other = "authors" - -[posts] -other = "포스트" - -[reading_time] -one = "읽는데 1분" -other = "읽는데 {{ .Count }}분" - -[page_not_found] -other = "페이지를 찾을 수 없습니다." - -[page_does_not_exist] -other = "해당 페이지가 존재하지 않습니다." - -[head_back] -other = "홈페이지로 돌아가기" - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Powered by" - -[see_also] -other = "관련 글:" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/ms.toml b/themes/hugo-coder/i18n/ms.toml deleted file mode 100644 index 25b037e2..00000000 --- a/themes/hugo-coder/i18n/ms.toml +++ /dev/null @@ -1,67 +0,0 @@ -[categories] -one = "kategori" -other = "kategori" - -[tags] -one = "teg" -other = "teg" - -[series] -one = "siri" -other = "siri" - -[author] -other = "penulis" - -[authors] -one = "penulis" -other = "penulis" - -[posts] -other = "artikel" - -[reading_time] -one = "Bacaan 1 minit" -other = "Bacaan {{ .Count }} minit" - -[page_not_found] -other = "Halaman Tidak Dijumpai" - -[page_does_not_exist] -other = "Maaf, halaman ini tidak wujud." - -[head_back] -other = "Kembali ke halaman utama." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Dikuasakan oleh" - -[see_also] -other = "Lihat juga" - -[note] -other = "nota" - -[tip] -other = "tip" - -[example] -other = "contoh" - -[question] -other = "soalan" - -[info] -other = "maklumat" - -[warning] -other = "amaran" - -[error] -other = "ralat" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/nl.toml b/themes/hugo-coder/i18n/nl.toml deleted file mode 100644 index 801e5256..00000000 --- a/themes/hugo-coder/i18n/nl.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "categorie" -other = "categorieën" - -[tags] -one = "label" -other = "labels" - -[series] -one = "serie" -other = "series" - -[authors] -one = "auteur" -other = "auteurs" - -[posts] -other = "berichten" - -[reading_time] -one = "Eén minuut leestijd" -other = "{{ .Count }} minuten leestijd" - -[page_not_found] -other = "Pagina niet gevonden" - -[page_does_not_exist] -other = "Sorry, deze pagina bestaat niet." - -[head_back] -other = "U kunt terugkeren naar de thuispagina." - -[licensed_under] -other = "Valt onder" - -[powered_by] -other = "Aangedreven door" - -[see_also] -other = "Zie ook in" - -[note] -other = "notitie" - -[tip] -other = "tip" - -[example] -other = "voorbeeld" - -[question] -other = "vraag" - -[info] -other = "info" - -[warning] -other = "waarschuwing" - -[error] -other = "fout" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/np.toml b/themes/hugo-coder/i18n/np.toml deleted file mode 100644 index 4e1e1c04..00000000 --- a/themes/hugo-coder/i18n/np.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "वर्ग" -other = "categories" - -[tags] -one = "ट्याग" -other = "tags" - -[series] -one = "श्रृंखला" -other = "series" - -[authors] -one = "लेखक" -other = "authors" - -[posts] -other = "सामग्री" - -[reading_time] -one = "एक मिनेट पढाई" -other = "{{ .Count }} मिनेट पढाई" - -[page_not_found] -other = "पृष्ठ भेटिएन।" - -[page_does_not_exist] -other = "माफ गर्नुहोस, यो पृष्ठ उपलब्ध छैन।" - -[head_back] -other = "मुख्यपृष्ठमा फर्किन सक्नुहुन्छ।मुख्यपृष्ठ." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "द्वारा संचालित" - -[see_also] -other = "यो पनि हेर्नुहोस।" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/pl.toml b/themes/hugo-coder/i18n/pl.toml deleted file mode 100644 index 801bec49..00000000 --- a/themes/hugo-coder/i18n/pl.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "kategoria" -other = "kategorie" - -[tags] -one = "tag" -other = "tagi" - -[series] -one = "seria" -other = "seria" - -[authors] -one = "autor(ka)" -other = "autorzy" - -[posts] -other = "posty" - -[reading_time] -one = "1 min czytania" -other = "{{ .Count }} min czytania" - -[page_not_found] -other = "Nie znaleziono strony" - -[page_does_not_exist] -other = "Wybacz, ta strona nie istnieje." - -[head_back] -other = "Możesz wrócić na stronę domową." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Napędzane przez" - -[see_also] -other = "Zobacz również" - -[note] -other = "notka" - -[tip] -other = "wskazówka" - -[example] -other = "przykład" - -[question] -other = "pytanie" - -[info] -other = "informacja" - -[warning] -other = "ostrzeżenie" - -[error] -other = "błąd" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/pt-br.toml b/themes/hugo-coder/i18n/pt-br.toml deleted file mode 100644 index 422771c7..00000000 --- a/themes/hugo-coder/i18n/pt-br.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "categoria" -other = "categorias" - -[tags] -one = "etiqueta" -other = "etiquetas" - -[series] -one = "séries" -other = "séries" - -[authors] -one = "autor" -other = "autores" - -[posts] -other = "artigos" - -[reading_time] -one = "Um minuto de leitura" -other = "{{ .Count }} minutos de leitura" - -[page_not_found] -other = "Página Não Encontrada" - -[page_does_not_exist] -other = "Desculpe, essa página não existe." - -[head_back] -other = "Você pode voltar para a página inicial." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Promovido por" - -[see_also] -other = "Veja também em" - -[note] -other = "nota" - -[tip] -other = "dica" - -[example] -other = "exemplo" - -[question] -other = "pergunta" - -[info] -other = "info" - -[warning] -other = "aviso" - -[error] -other = "erro" - -[link_to_heading] -other = "Link para o cabeçalho" diff --git a/themes/hugo-coder/i18n/ro.toml b/themes/hugo-coder/i18n/ro.toml deleted file mode 100644 index d3bb8544..00000000 --- a/themes/hugo-coder/i18n/ro.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "categorie" -other = "categorii" - -[tags] -one = "etichetă" -other = "etichete" - -[series] -one = "serie" -other = "serie" - -[authors] -one = "autor" -other = "autori" - -[posts] -other = "articole" - -[reading_time] -one = "Un minut de lectură" -other = "{{ .Count }} {{ if lt .Count 20 }}minute{{ else }}de minute{{ end }} de lectură" - -[page_not_found] -other = "Pagină Pierdută" - -[page_does_not_exist] -other = "Ne pare rău, această pagină nu există." - -[head_back] -other = "Poți să te întorci la pagina principală." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Susținut de" - -[see_also] -other = "Altele din seria" - -[note] -other = "notă" - -[tip] -other = "sfat" - -[example] -other = "exemplu" - -[question] -other = "întrebare" - -[info] -other = "informație" - -[warning] -other = "avertisment" - -[error] -other = "eroare" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/ru.toml b/themes/hugo-coder/i18n/ru.toml deleted file mode 100644 index c360fb05..00000000 --- a/themes/hugo-coder/i18n/ru.toml +++ /dev/null @@ -1,65 +0,0 @@ -[categories] -one = "категория" -other = "категории" - -[tags] -one = "тэг" -other = "тэги" - -[series] -one = "серия" -other = "серии" - -[authors] -one = "автор" -other = "авторы" - -[posts] -other = "статьи" - -[reading_time] -one = "Читать одну минуту" -few = "Читать около {{ .Count }} минут" -many = "Читать около {{ .Count }} минут" - -[page_not_found] -other = "Страница не найдена" - -[page_does_not_exist] -other = "Извините, но этой страницы не существует." - -[head_back] -other = "Можете вернуться обратно на домашнюю страницу." - -[licensed_under] -other = "Лицензирован по" - -[powered_by] -other = "Работает на" - -[see_also] -other = "Также смотрите" - -[note] -other = "заметка" - -[tip] -other = "совет" - -[example] -other = "пример" - -[question] -other = "вопрос" - -[info] -other = "информация" - -[warning] -other = "предупреждение" - -[error] -other = "ошибка" - -[link_to_heading] -other = "Ссылка на заголовок" diff --git a/themes/hugo-coder/i18n/se.toml b/themes/hugo-coder/i18n/se.toml deleted file mode 100644 index 566992a0..00000000 --- a/themes/hugo-coder/i18n/se.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "Kategori" -other = "Katgorier" - -[tags] -one = "Tagg" -other = "Taggar" - -[series] -one = "Serie" -other = "Serie" - -[authors] -one = "Författare" -other = "Författare" - -[posts] -other = "Inlägg" - -[reading_time] -one = "Ein minut lästid" -other = "{{ .Count }} minuter lästid" - -[page_not_found] -other = "Sida hittades inte" - -[page_does_not_exist] -other = "Ursäkta, men denna sida existerar inte." - -[head_back] -other = "Här kan du komma tillbaka till startsidan." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Byggd med" - -[see_also] -other = "Se även i" - -[note] -other = "Notera" - -[tip] -other = "Tips" - -[example] -other = "Exempel" - -[question] -other = "Fråga" - -[info] -other = "Info" - -[warning] -other = "Varning" - -[error] -other = "Fel" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/sk.toml b/themes/hugo-coder/i18n/sk.toml deleted file mode 100644 index 3c8b2555..00000000 --- a/themes/hugo-coder/i18n/sk.toml +++ /dev/null @@ -1,66 +0,0 @@ -[categories] -one = "kategória" -other = "categories" - -[tags] -one = "téma" -other = "tags" - -[series] -one = "diel" -other = "series" - -[authors] -one = "autor" -other = "authors" - -[posts] -other = "články" - -[reading_time] -one = "Prečítate si za minútu" -few = "Prečítate si do {{ .Count }} minút" -many = "Prečítate si do {{ .Count }} minút" -other = "Prečítate si do {{ .Count }} minút" - -[page_not_found] -other = "Stránka nebola nájdená" - -[page_does_not_exist] -other = "Prepáčte, ale táto stránka neexistuje." - -[head_back] -other = "Späť na domácu stránku." - -[licensed_under] -other = "Licensed under" - -[see_also] -other = "Pozrite tiež" - -[powered_by] -other = "Táto stránka bola vytvorená cez" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/sq.toml b/themes/hugo-coder/i18n/sq.toml deleted file mode 100644 index f0ed7d25..00000000 --- a/themes/hugo-coder/i18n/sq.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "kategori" -other = "kategoritë" - -[tags] -one = "shenjim" -other = "shenjuesat" - -[series] -one = "seritë" -other = "seritë" - -[authors] -one = "autori" -other = "autorë" - -[posts] -other = "postime" - -[reading_time] -one = "një-minutë lexim" -other = "{{ .Count }}-minute read" - -[page_not_found] -other = "Nuk u gjet faqja" - -[page_does_not_exist] -other = "Na vjen keq, faqja nuk u gjet" - -[head_back] -other = "Mund të ktheheni prapë tek homepage." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "Mundësuar nga " - -[see_also] -other = "Shiko gjithashtu" - -[note] -other = "shënim" - -[tip] -other = "ndihmë" - -[example] -other = "shembull" - -[question] -other = "pyetje" - -[info] -other = "informacion" - -[warning] -other = "kujdes" - -[error] -other = "gabim" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/i18n/sr-latn.toml b/themes/hugo-coder/i18n/sr-latn.toml deleted file mode 100644 index 9956d9b0..00000000 --- a/themes/hugo-coder/i18n/sr-latn.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "kategorija" -other = "kategorije" - -[tags] -one = "tag" -other = "tagovi" - -[series] -one = "serija" -other = "serije" - -[authors] -one = "autor" -other = "autori" - -[posts] -other = "objave" - -[reading_time] -one = "Jedan minut čitanja" -other = "{{ .Count }} minuta čitanja" - -[page_not_found] -other = "Stranica nije pronađena" - -[page_does_not_exist] -other = "Izvinite, ova stranica ne postoji." - -[head_back] -other = "Možete se vratiti na početnu stranicu." - -[licensed_under] -other = "Licensirano pod" - -[powered_by] -other = "Pokreće" - -[see_also] -other = "takođe proveriti" - -[note] -other = "napomena" - -[tip] -other = "savet" - -[example] -other = "primer" - -[question] -other = "pitanje" - -[info] -other = "informacija" - -[warning] -other = "upozorenje" - -[error] -other = "greška" - -[link_to_heading] -other = "Link do zaglavlja" diff --git a/themes/hugo-coder/i18n/sr.toml b/themes/hugo-coder/i18n/sr.toml deleted file mode 100644 index 3c23395b..00000000 --- a/themes/hugo-coder/i18n/sr.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "категорија" -other = "категорије" - -[tags] -one = "таг" -other = "тагови" - -[series] -one = "серија" -other = "серије" - -[authors] -one = "аутор" -other = "аутори" - -[posts] -other = "објаве" - -[reading_time] -one = "Један минут читања" -other = "{{ .Count }} минута читања" - -[page_not_found] -other = "Страница није пронађена" - -[page_does_not_exist] -other = "Извините, ова страница не постоји." - -[head_back] -other = "Можете се вратити на почетну страницу." - -[licensed_under] -other = "Лиценсирано под" - -[powered_by] -other = "Покреће" - -[see_also] -other = "такође проверити" - -[note] -other = "напомена" - -[tip] -other = "савет" - -[example] -other = "пример" - -[question] -other = "питање" - -[info] -other = "информација" - -[warning] -other = "упозорење" - -[error] -other = "грешка" - -[link_to_heading] -other = "Линк до заглавља" diff --git a/themes/hugo-coder/i18n/tr.toml b/themes/hugo-coder/i18n/tr.toml deleted file mode 100644 index 84787ade..00000000 --- a/themes/hugo-coder/i18n/tr.toml +++ /dev/null @@ -1,65 +0,0 @@ -[categories] -one = "kategori" -other = "kategoriler" - -[tags] -one = "etiket" -other = "etiketler" - -[series] -one = "dizi" -other = "diziler" - -[authors] -one = "yazar" -other = "yazarlar" - -[posts] -one = "Gönderi" -other = "Gönderiler" - -[reading_time] -one = "Bir dakikalık okuma" -other = "{{ .Count }} dakikalık okuma" - -[page_not_found] -other = "Sayfa Bulunamadı" - -[page_does_not_exist] -other = "Özür dileriz, böyle bir sayfa yok." - -[head_back] -other = "Ana sayfaya dönebilirsiniz." - -[licensed_under] -other = "Lisansı alınta:" - -[powered_by] -other = "Site program altyapısı" - -[see_also] -other = "Ayrıca bakınız" - -[note] -other = "not" - -[tip] -other = "tavsiye" - -[example] -other = "örnek" - -[question] -other = "soru" - -[info] -other = "bilgi" - -[warning] -other = "uyarı" - -[error] -other = "hata" - -[link_to_heading] -other = "Başlık Linki" diff --git a/themes/hugo-coder/i18n/zh-cn.toml b/themes/hugo-coder/i18n/zh-cn.toml deleted file mode 100644 index e6392c89..00000000 --- a/themes/hugo-coder/i18n/zh-cn.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "分类" -other = "分类" - -[tags] -one = "标签" -other = "标签" - -[series] -one = "系列" -other = "系列" - -[authors] -one = "作者" -other = "作者" - -[posts] -other = "文章" - -[reading_time] -one = "阅读时间:1 分钟" -other = "阅读时间:{{ .Count }} 分钟" - -[page_not_found] -other = "找不到页面" - -[page_does_not_exist] -other = "此页面不存在" - -[head_back] -other = "返回 首页." - -[licensed_under] -other = "许可依据" - -[powered_by] -other = "技术支持" - -[see_also] -other = "参见" - -[note] -other = "注释" - -[tip] -other = "提示" - -[example] -other = "例子" - -[question] -other = "问题" - -[info] -other = "信息" - -[warning] -other = "警告" - -[error] -other = "错误" - -[link_to_heading] -other = "链接到标题" diff --git a/themes/hugo-coder/i18n/zh-tw.toml b/themes/hugo-coder/i18n/zh-tw.toml deleted file mode 100644 index 69fb4c6d..00000000 --- a/themes/hugo-coder/i18n/zh-tw.toml +++ /dev/null @@ -1,64 +0,0 @@ -[categories] -one = "分類" -other = "categories" - -[tags] -one = "標籤" -other = "tags" - -[series] -one = "系列" -other = "series" - -[authors] -one = "作者" -other = "authors" - -[posts] -other = "文章" - -[reading_time] -one = "閱讀時間 1 分鐘" -other = "閱讀時間 {{ .Count }} 分鐘" - -[page_not_found] -other = "找不到頁面" - -[page_does_not_exist] -other = "此頁面不存在" - -[head_back] -other = "返回 首頁." - -[licensed_under] -other = "Licensed under" - -[powered_by] -other = "技術支援" - -[see_also] -other = "參見" - -[note] -other = "note" - -[tip] -other = "tip" - -[example] -other = "example" - -[question] -other = "question" - -[info] -other = "info" - -[warning] -other = "warning" - -[error] -other = "error" - -[link_to_heading] -other = "Link to heading" diff --git a/themes/hugo-coder/layouts/404.html b/themes/hugo-coder/layouts/404.html deleted file mode 100644 index 23ecb19b..00000000 --- a/themes/hugo-coder/layouts/404.html +++ /dev/null @@ -1,3 +0,0 @@ -{{ define "content" }} - {{ partial "404.html" . }} -{{ end }} diff --git a/themes/hugo-coder/layouts/_default/_markup/render-heading.html b/themes/hugo-coder/layouts/_default/_markup/render-heading.html deleted file mode 100644 index 7c5bfe22..00000000 --- a/themes/hugo-coder/layouts/_default/_markup/render-heading.html +++ /dev/null @@ -1,7 +0,0 @@ - - {{ .Text | safeHTML }} - - - {{ i18n "link_to_heading" | default "Link to heading" }} - - diff --git a/themes/hugo-coder/layouts/_default/_markup/render-link.html b/themes/hugo-coder/layouts/_default/_markup/render-link.html deleted file mode 100644 index 0b13cd21..00000000 --- a/themes/hugo-coder/layouts/_default/_markup/render-link.html +++ /dev/null @@ -1 +0,0 @@ -{{ .Text | safeHTML }} \ No newline at end of file diff --git a/themes/hugo-coder/layouts/_default/baseof.html b/themes/hugo-coder/layouts/_default/baseof.html deleted file mode 100644 index d998247b..00000000 --- a/themes/hugo-coder/layouts/_default/baseof.html +++ /dev/null @@ -1,129 +0,0 @@ - - - - - {{ block "title" . }}{{ .Site.Title }}{{ end }} - {{ partial "head.html" . }} - - {{ with .Params.featuredImage }} - - - - {{ end }} - - - - -{{ $csClass := "colorscheme-light" }} -{{ if eq .Site.Params.colorScheme "dark" }} -{{ $csClass = "colorscheme-dark" }} -{{ else if eq .Site.Params.colorScheme "auto" }} -{{ $csClass = "colorscheme-auto" }} -{{ end }} - - - {{ partial "float" . }} -
- {{ partial "header.html" . }} - -
- {{ block "content" . }}{{ end }} -
- - {{ partial "footer.html" . }} -
- - {{ if .HasShortcode "mermaid" }} - - - {{ end }} - - {{ if .Site.IsServer }} - {{ $script := resources.Get "js/coder.js" }} - - {{ else }} - {{ $script := resources.Get "js/coder.js" | minify | fingerprint }} - - {{ end }} - - {{ range .Site.Params.customJS }} - {{ if $.Site.IsServer }} - {{ $script := resources.Get . }} - - {{ else }} - {{ $script := resources.Get . | minify | fingerprint }} - - {{ end }} - {{ end }} - - {{ range .Site.Params.customRemoteJS }} - {{ if $.Site.IsServer }} - {{ $script := resources.GetRemote . }} - - {{ else }} - {{ $script := resources.GetRemote . | minify | fingerprint }} - - {{ end }} - {{ end }} - - - {{ template "_internal/google_analytics.html" . }} - - {{ if and .Site.Params.fathomAnalytics .Site.Params.fathomAnalytics.siteID }} - {{- partial "analytics/fathom" . -}} - {{ end }} - - {{ if and .Site.Params.plausibleAnalytics .Site.Params.plausibleAnalytics.domain }} - {{- partial "analytics/plausible" . -}} - {{ end }} - - {{ if and .Site.Params.goatCounter .Site.Params.goatCounter.code }} - {{- partial "analytics/goatcounter" . -}} - {{ end }} - - {{ if and .Site.Params.cloudflare .Site.Params.cloudflare.token }} - {{- partial "analytics/cloudflare" . -}} - {{ end }} - - {{ if and .Site.Params.baidu .Site.Params.baidu.token }} - {{- partial "analytics/baidu" . -}} - {{ end }} - - {{ if and .Site.Params.wideAngleAnalytics .Site.Params.wideAngleAnalytics.siteID }} - {{- partial "analytics/wideangle" . -}} - {{ end }} - - {{ if and .Site.Params.matomo .Site.Params.matomo.serverURL }} - {{- partial "analytics/matomo" . -}} - {{ end }} - - {{ if and .Site.Params.googleTagManager .Site.Params.googleTagManager.id }} - {{- partial "analytics/googletagmanager" . -}} - {{ end }} - - {{ if and .Site.Params.applicationInsights .Site.Params.applicationInsights.connectionString }} - {{- partial "analytics/applicationinsights" . -}} - {{ end }} - - {{ if and .Site.Params.microAnalytics .Site.Params.microAnalytics.id }} - {{- partial "analytics/microanalyticsio" . -}} - {{ end }} - - {{ if and .Site.Params.yandexMetrika .Site.Params.yandexMetrika.id }} - {{- partial "analytics/yandex-metrika" . -}} - {{ end }} - - {{ if and .Site.Params.umami .Site.Params.umami.siteID }} - {{- partial "analytics/umami" . -}} - {{ end }} - - {{ if and .Site.Params.pirsch .Site.Params.pirsch.code }} - {{- partial "analytics/pirsch" . -}} - {{ end }} - - {{- partial "body/extensions" . -}} - - - diff --git a/themes/hugo-coder/layouts/_default/li.html b/themes/hugo-coder/layouts/_default/li.html deleted file mode 100644 index 79b53cb5..00000000 --- a/themes/hugo-coder/layouts/_default/li.html +++ /dev/null @@ -1,4 +0,0 @@ -
  • - {{ .Date | time.Format (.Site.Params.dateFormat | default "January 2, 2006" ) }} - {{ .Title }} -
  • diff --git a/themes/hugo-coder/layouts/_default/list.html b/themes/hugo-coder/layouts/_default/list.html deleted file mode 100644 index 17abb713..00000000 --- a/themes/hugo-coder/layouts/_default/list.html +++ /dev/null @@ -1,22 +0,0 @@ -{{ define "title" }} - {{ title (i18n (lower .Title)) | default .Title }} · {{ .Site.Title }} -{{ end }} -{{ define "content" }} -
    -
    -

    - - {{ title (i18n (lower .Title)) | default .Title }} - -

    -
    - {{ .Content }} -
      - {{- range .Paginator.Pages -}} - {{- .Render "li" -}} - {{- end -}} -
    - - {{ partial "pagination.html" . }} -
    -{{ end }} diff --git a/themes/hugo-coder/layouts/_default/single.html b/themes/hugo-coder/layouts/_default/single.html deleted file mode 100644 index d8823f29..00000000 --- a/themes/hugo-coder/layouts/_default/single.html +++ /dev/null @@ -1,56 +0,0 @@ -{{ define "title" }} - {{ .Title }} · {{ .Site.Title }} -{{ end }} -{{ define "content" }} -
    -
    -
    -
    -

    - - {{ .Title }} - -

    -
    - -
    - -
    - {{ if .Params.featuredImage }} - Featured image - {{ end }} - {{ .Content }} -
    - - -
    - {{ partial "posts/series.html" . }} - {{ partial "posts/disqus.html" . }} - {{ partial "posts/commento.html" . }} - {{ partial "posts/utterances.html" . }} - {{ partial "posts/giscus.html" . }} - {{ partial "posts/telegram.html" . }} -
    -
    - - {{ partial "posts/math.html" . }} -
    -{{ end }} diff --git a/themes/hugo-coder/layouts/_default/terms.html b/themes/hugo-coder/layouts/_default/terms.html deleted file mode 100644 index 48b37e2c..00000000 --- a/themes/hugo-coder/layouts/_default/terms.html +++ /dev/null @@ -1,10 +0,0 @@ -{{ define "title" }} - {{- if eq .Kind "term" -}} - {{- i18n .Data.Plural | title -}} - {{- print ": " -}} - {{- end -}} - {{- i18n (lower .Title) | default .Title | title }} · {{ .Site.Title -}} -{{ end }} -{{ define "content" }} - {{ partial "terms.html" . }} -{{ end }} diff --git a/themes/hugo-coder/layouts/index.html b/themes/hugo-coder/layouts/index.html deleted file mode 100644 index b829b616..00000000 --- a/themes/hugo-coder/layouts/index.html +++ /dev/null @@ -1,3 +0,0 @@ -{{ define "content" }} - {{ partial "home.html" . }} -{{ end }} diff --git a/themes/hugo-coder/layouts/partials/404.html b/themes/hugo-coder/layouts/partials/404.html deleted file mode 100644 index a884c685..00000000 --- a/themes/hugo-coder/layouts/partials/404.html +++ /dev/null @@ -1,7 +0,0 @@ -
    -
    -

    404

    -

    {{ i18n "page_not_found" }}

    -

    {{ i18n "page_does_not_exist" }}
    {{ i18n "head_back" .Site.BaseURL | safeHTML }}

    -
    -
    diff --git a/themes/hugo-coder/layouts/partials/analytics/applicationinsights.html b/themes/hugo-coder/layouts/partials/analytics/applicationinsights.html deleted file mode 100644 index 72d2d18f..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/applicationinsights.html +++ /dev/null @@ -1,13 +0,0 @@ - diff --git a/themes/hugo-coder/layouts/partials/analytics/baidu.html b/themes/hugo-coder/layouts/partials/analytics/baidu.html deleted file mode 100644 index e2fcbe9e..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/baidu.html +++ /dev/null @@ -1,11 +0,0 @@ - - - diff --git a/themes/hugo-coder/layouts/partials/analytics/cloudflare.html b/themes/hugo-coder/layouts/partials/analytics/cloudflare.html deleted file mode 100644 index d8a732f8..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/cloudflare.html +++ /dev/null @@ -1,4 +0,0 @@ - - - diff --git a/themes/hugo-coder/layouts/partials/analytics/fathom.html b/themes/hugo-coder/layouts/partials/analytics/fathom.html deleted file mode 100644 index 13e7cfc7..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/fathom.html +++ /dev/null @@ -1,13 +0,0 @@ - diff --git a/themes/hugo-coder/layouts/partials/analytics/goatcounter.html b/themes/hugo-coder/layouts/partials/analytics/goatcounter.html deleted file mode 100644 index 5ba30634..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/goatcounter.html +++ /dev/null @@ -1,2 +0,0 @@ - diff --git a/themes/hugo-coder/layouts/partials/analytics/googletagmanager.html b/themes/hugo-coder/layouts/partials/analytics/googletagmanager.html deleted file mode 100644 index 2d2581d3..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/googletagmanager.html +++ /dev/null @@ -1,9 +0,0 @@ - - - diff --git a/themes/hugo-coder/layouts/partials/analytics/matomo.html b/themes/hugo-coder/layouts/partials/analytics/matomo.html deleted file mode 100644 index 8fabb635..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/matomo.html +++ /dev/null @@ -1,13 +0,0 @@ - diff --git a/themes/hugo-coder/layouts/partials/analytics/microanalyticsio.html b/themes/hugo-coder/layouts/partials/analytics/microanalyticsio.html deleted file mode 100644 index 0e8b65b4..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/microanalyticsio.html +++ /dev/null @@ -1,8 +0,0 @@ - \ No newline at end of file diff --git a/themes/hugo-coder/layouts/partials/analytics/pirsch.html b/themes/hugo-coder/layouts/partials/analytics/pirsch.html deleted file mode 100644 index c2fa8526..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/pirsch.html +++ /dev/null @@ -1,3 +0,0 @@ - \ No newline at end of file diff --git a/themes/hugo-coder/layouts/partials/analytics/plausible.html b/themes/hugo-coder/layouts/partials/analytics/plausible.html deleted file mode 100644 index 28450c00..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/plausible.html +++ /dev/null @@ -1 +0,0 @@ - diff --git a/themes/hugo-coder/layouts/partials/analytics/umami.html b/themes/hugo-coder/layouts/partials/analytics/umami.html deleted file mode 100644 index 42a917fb..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/umami.html +++ /dev/null @@ -1,4 +0,0 @@ - - - \ No newline at end of file diff --git a/themes/hugo-coder/layouts/partials/analytics/wideangle.html b/themes/hugo-coder/layouts/partials/analytics/wideangle.html deleted file mode 100644 index 3ec5b709..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/wideangle.html +++ /dev/null @@ -1,5 +0,0 @@ - diff --git a/themes/hugo-coder/layouts/partials/analytics/yandex-metrika.html b/themes/hugo-coder/layouts/partials/analytics/yandex-metrika.html deleted file mode 100644 index 9261187a..00000000 --- a/themes/hugo-coder/layouts/partials/analytics/yandex-metrika.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - diff --git a/themes/hugo-coder/layouts/partials/body/extensions.html b/themes/hugo-coder/layouts/partials/body/extensions.html deleted file mode 100644 index 6dddea7e..00000000 --- a/themes/hugo-coder/layouts/partials/body/extensions.html +++ /dev/null @@ -1,5 +0,0 @@ -{{/* - This extension point occurs just before the end of each page's tag. - - You can add further theme extensions or customizations here if needed. -*/}} diff --git a/themes/hugo-coder/layouts/partials/csp.html b/themes/hugo-coder/layouts/partials/csp.html deleted file mode 100644 index e9238a43..00000000 --- a/themes/hugo-coder/layouts/partials/csp.html +++ /dev/null @@ -1 +0,0 @@ -{{ printf `` (delimit .Site.Params.csp.childsrc " ") (delimit .Site.Params.csp.fontsrc " ") (delimit .Site.Params.csp.formaction " ") (delimit .Site.Params.csp.framesrc " ") (delimit .Site.Params.csp.imgsrc " ") (delimit .Site.Params.csp.objectsrc " ") (delimit .Site.Params.csp.stylesrc " ") (delimit .Site.Params.csp.scriptsrc " ") (delimit .Site.Params.csp.prefetchsrc " ") (delimit .Site.Params.csp.connectsrc " ") | safeHTML }} diff --git a/themes/hugo-coder/layouts/partials/float.html b/themes/hugo-coder/layouts/partials/float.html deleted file mode 100644 index 623047d6..00000000 --- a/themes/hugo-coder/layouts/partials/float.html +++ /dev/null @@ -1,7 +0,0 @@ -{{ if not .Site.Params.hideColorSchemeToggle }} - -{{ end }} diff --git a/themes/hugo-coder/layouts/partials/footer.html b/themes/hugo-coder/layouts/partials/footer.html deleted file mode 100644 index 201f5a2a..00000000 --- a/themes/hugo-coder/layouts/partials/footer.html +++ /dev/null @@ -1,19 +0,0 @@ -
    -
    - © - {{ if (and .Site.Params.since (lt .Site.Params.since now.Year)) }} - {{ .Site.Params.since }} - - {{ end }} - {{ now.Year }} - {{ with .Site.Params.author }} {{ . }} {{ end }} - · - {{ if (and .Site.Params.license) }} - {{ i18n "licensed_under" }} {{ .Site.Params.license | safeHTML }} - · - {{ end }} - {{ i18n "powered_by" }} Hugo & Coder. - {{ if (and .Site.Params.commit .GitInfo) }} - [{{ .GitInfo.AbbreviatedHash }}] - {{ end }} -
    -
    diff --git a/themes/hugo-coder/layouts/partials/head.html b/themes/hugo-coder/layouts/partials/head.html deleted file mode 100644 index e9cad8b1..00000000 --- a/themes/hugo-coder/layouts/partials/head.html +++ /dev/null @@ -1,21 +0,0 @@ -{{ partial "head/meta-tags.html" . }} - -{{ if .Params.canonicalUrl }} - -{{ else }} - -{{ end }} - -{{ partialCached "head/theme-styles.html" . }} - -{{ partialCached "head/color-scheme.html" . }} - -{{ partialCached "head/custom-styles.html" . }} - -{{ partialCached "head/custom-icons.html" . }} - -{{ partial "head/alternative-output-formats.html" . }} - -{{ if .IsHome }}{{ partial "head/hugo-generator.html" . }}{{ end }} - -{{ partial "head/extensions.html" . }} diff --git a/themes/hugo-coder/layouts/partials/head/alternative-output-formats.html b/themes/hugo-coder/layouts/partials/head/alternative-output-formats.html deleted file mode 100644 index 8b1829a7..00000000 --- a/themes/hugo-coder/layouts/partials/head/alternative-output-formats.html +++ /dev/null @@ -1,3 +0,0 @@ -{{ range .AlternativeOutputFormats -}} - {{ printf `` .Rel .MediaType.Type .RelPermalink $.Site.Title | safeHTML }} -{{ end -}} diff --git a/themes/hugo-coder/layouts/partials/head/color-scheme.html b/themes/hugo-coder/layouts/partials/head/color-scheme.html deleted file mode 100644 index 7a55e8ea..00000000 --- a/themes/hugo-coder/layouts/partials/head/color-scheme.html +++ /dev/null @@ -1,11 +0,0 @@ -{{ if or (eq .Site.Params.colorScheme "auto") (eq .Site.Params.colorScheme "dark") }} - {{ if .Site.IsServer }} - {{ $cssOpts := (dict "targetPath" "css/coder-dark.css" "enableSourceMap" true ) }} - {{ $styles := resources.Get "scss/coder-dark.scss" | resources.ExecuteAsTemplate "style.coder-dark.css" . | toCSS $cssOpts }} - - {{ else }} - {{ $cssOpts := (dict "targetPath" "css/coder-dark.css" ) }} - {{ $styles := resources.Get "scss/coder-dark.scss" | resources.ExecuteAsTemplate "style.coder-dark.css" . | toCSS $cssOpts | minify | fingerprint }} - - {{ end }} -{{ end }} diff --git a/themes/hugo-coder/layouts/partials/head/custom-icons.html b/themes/hugo-coder/layouts/partials/head/custom-icons.html deleted file mode 100644 index 6bad1a13..00000000 --- a/themes/hugo-coder/layouts/partials/head/custom-icons.html +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/themes/hugo-coder/layouts/partials/head/custom-styles.html b/themes/hugo-coder/layouts/partials/head/custom-styles.html deleted file mode 100644 index 64969633..00000000 --- a/themes/hugo-coder/layouts/partials/head/custom-styles.html +++ /dev/null @@ -1,21 +0,0 @@ - {{ range .Site.Params.customCSS }} - {{ if $.Site.IsServer }} - {{ $styles := resources.Get . }} - - {{ else }} - {{ $styles := resources.Get . | minify | fingerprint }} - - {{ end }} -{{ end }} - -{{ range .Site.Params.customSCSS }} - {{/* We don't change the targetPath to because it's transparent to users */}} - {{ if $.Site.IsServer }} - {{ $cssOpts := (dict "enableSourceMap" true ) }} - {{ $styles := resources.Get . | toCSS $cssOpts }} - - {{ else }} - {{ $styles := resources.Get . | toCSS | minify | fingerprint }} - - {{ end }} -{{ end }} diff --git a/themes/hugo-coder/layouts/partials/head/extensions.html b/themes/hugo-coder/layouts/partials/head/extensions.html deleted file mode 100644 index a9eb885a..00000000 --- a/themes/hugo-coder/layouts/partials/head/extensions.html +++ /dev/null @@ -1,4 +0,0 @@ -{{/* - You can add further theme extensions or customizations here if they should - appear in . -*/}} diff --git a/themes/hugo-coder/layouts/partials/head/hugo-generator.html b/themes/hugo-coder/layouts/partials/head/hugo-generator.html deleted file mode 100644 index cee2034a..00000000 --- a/themes/hugo-coder/layouts/partials/head/hugo-generator.html +++ /dev/null @@ -1 +0,0 @@ -{{ hugo.Generator }} diff --git a/themes/hugo-coder/layouts/partials/head/meta-tags.html b/themes/hugo-coder/layouts/partials/head/meta-tags.html deleted file mode 100644 index 5a2c7081..00000000 --- a/themes/hugo-coder/layouts/partials/head/meta-tags.html +++ /dev/null @@ -1,18 +0,0 @@ - - - - -{{ if .Site.Params.csp }} -{{ partial "csp.html" . }} -{{ end }} - -{{ with .Site.Params.author }} -{{ end }} - - - -{{ template "_internal/twitter_cards.html" . }} -{{ template "_internal/opengraph.html" . }} - - - \ No newline at end of file diff --git a/themes/hugo-coder/layouts/partials/head/theme-styles.html b/themes/hugo-coder/layouts/partials/head/theme-styles.html deleted file mode 100644 index 720b7b29..00000000 --- a/themes/hugo-coder/layouts/partials/head/theme-styles.html +++ /dev/null @@ -1,23 +0,0 @@ - - -{{ if .Site.IsServer }} - {{ $cssOpts := (dict "targetPath" "css/coder.css" "enableSourceMap" true ) }} - {{ $styles := resources.Get "scss/coder.scss" | resources.ExecuteAsTemplate "style.coder.css" . | toCSS $cssOpts }} - -{{ else }} - {{ $cssOpts := (dict "targetPath" "css/coder.css" ) }} - {{ $styles := resources.Get "scss/coder.scss" | resources.ExecuteAsTemplate "style.coder.css" . | toCSS $cssOpts | minify | fingerprint }} - -{{ end }} - -{{ if .Site.Params.rtl }} - {{ if .Site.IsServer }} - {{ $cssOpts := (dict "targetPath" "css/coder-rtl.css" "enableSourceMap" true ) }} - {{ $styles := resources.Get "scss/coder-rtl.scss" | resources.ExecuteAsTemplate "style.coder-rtl.css" . | toCSS $cssOpts }} - - {{ else }} - {{ $cssOpts := (dict "targetPath" "css/coder-rtl.css" ) }} - {{ $styles := resources.Get "scss/coder-rtl.scss" | resources.ExecuteAsTemplate "style.coder-rtl.css" . | toCSS $cssOpts | minify | fingerprint }} - - {{ end }} -{{ end }} diff --git a/themes/hugo-coder/layouts/partials/header.html b/themes/hugo-coder/layouts/partials/header.html deleted file mode 100644 index c8857212..00000000 --- a/themes/hugo-coder/layouts/partials/header.html +++ /dev/null @@ -1,39 +0,0 @@ - diff --git a/themes/hugo-coder/layouts/partials/home.html b/themes/hugo-coder/layouts/partials/home.html deleted file mode 100644 index 8f178c84..00000000 --- a/themes/hugo-coder/layouts/partials/home.html +++ /dev/null @@ -1,11 +0,0 @@ -
    -
    - {{ partialCached "home/avatar.html" . }} - - {{ partialCached "home/author.html" . }} - - {{ partialCached "home/social.html" . }} -
    - - {{ partialCached "home/extensions.html" . }} -
    diff --git a/themes/hugo-coder/layouts/partials/home/author.html b/themes/hugo-coder/layouts/partials/home/author.html deleted file mode 100644 index 97437d0f..00000000 --- a/themes/hugo-coder/layouts/partials/home/author.html +++ /dev/null @@ -1,6 +0,0 @@ -

    {{ .Site.Params.author }}

    -{{ if reflect.IsSlice .Site.Params.info }} -

    {{ range .Site.Params.info }}{{ . | markdownify }}
    {{ end}}

    -{{ else }} -

    {{ .Site.Params.info | markdownify }}

    -{{ end }} diff --git a/themes/hugo-coder/layouts/partials/home/avatar.html b/themes/hugo-coder/layouts/partials/home/avatar.html deleted file mode 100644 index 49289152..00000000 --- a/themes/hugo-coder/layouts/partials/home/avatar.html +++ /dev/null @@ -1,8 +0,0 @@ -{{ if and (isset .Site.Params "avatarurl") (not (isset .Site.Params "gravatar")) }} - {{ with .Site.Params.avatarURL }} -
    avatar
    - {{ end }} -{{ end }} -{{ with .Site.Params.gravatar }} -
    gravatar
    -{{ end }} diff --git a/themes/hugo-coder/layouts/partials/home/extensions.html b/themes/hugo-coder/layouts/partials/home/extensions.html deleted file mode 100644 index ba42ccb5..00000000 --- a/themes/hugo-coder/layouts/partials/home/extensions.html +++ /dev/null @@ -1,4 +0,0 @@ -{{/* -You can add further theme extensions or customizations here if they should -appear in after the "about" section. -*/}} diff --git a/themes/hugo-coder/layouts/partials/home/social.html b/themes/hugo-coder/layouts/partials/home/social.html deleted file mode 100644 index 8d726c83..00000000 --- a/themes/hugo-coder/layouts/partials/home/social.html +++ /dev/null @@ -1,17 +0,0 @@ -{{ with .Site.Params.social }} - -{{ end }} \ No newline at end of file diff --git a/themes/hugo-coder/layouts/partials/list.html b/themes/hugo-coder/layouts/partials/list.html deleted file mode 100644 index 80026ae2..00000000 --- a/themes/hugo-coder/layouts/partials/list.html +++ /dev/null @@ -1,23 +0,0 @@ -
    -
    -

    - - {{- if eq .Kind "term" -}} - {{- i18n .Data.Plural 1 | title -}} - {{- print ": " -}} - {{- end -}} - {{- i18n (lower .Title) | default .Title | title -}} - -

    -
    - {{ .Content }} -
      - {{ range .Paginator.Pages }} -
    • - {{ .Date | time.Format (.Site.Params.dateFormat | default "January 2, 2006" ) }} - {{ .Title }} -
    • - {{ end }} -
    - {{ partial "pagination.html" . }} -
    diff --git a/themes/hugo-coder/layouts/partials/page.html b/themes/hugo-coder/layouts/partials/page.html deleted file mode 100644 index 656f2f11..00000000 --- a/themes/hugo-coder/layouts/partials/page.html +++ /dev/null @@ -1,13 +0,0 @@ -
    - -
    diff --git a/themes/hugo-coder/layouts/partials/pagination.html b/themes/hugo-coder/layouts/partials/pagination.html deleted file mode 100644 index 1e004b86..00000000 --- a/themes/hugo-coder/layouts/partials/pagination.html +++ /dev/null @@ -1,48 +0,0 @@ -{{ $paginator := .Paginator }} -{{ $adjacent_links := 2 }} -{{ $max_links := (add (mul $adjacent_links 2) 1) }} -{{ $lower_limit := (add $adjacent_links 1) }} -{{ $upper_limit := (sub $paginator.TotalPages $adjacent_links) }} -{{ if gt $paginator.TotalPages 1 }} - -{{ end }} diff --git a/themes/hugo-coder/layouts/partials/posts/commento.html b/themes/hugo-coder/layouts/partials/posts/commento.html deleted file mode 100644 index 93a245e2..00000000 --- a/themes/hugo-coder/layouts/partials/posts/commento.html +++ /dev/null @@ -1,4 +0,0 @@ -{{- if and (isset .Site.Params "commentourl") (not (eq .Site.Params.commentoURL "" )) (eq (.Params.disableComments | default false) false) -}} -
    - -{{- end -}} diff --git a/themes/hugo-coder/layouts/partials/posts/disqus.html b/themes/hugo-coder/layouts/partials/posts/disqus.html deleted file mode 100644 index d0b52422..00000000 --- a/themes/hugo-coder/layouts/partials/posts/disqus.html +++ /dev/null @@ -1,26 +0,0 @@ -{{- if and (not (eq (.Site.DisqusShortname | default "") "")) (eq (.Params.disableComments | default false) false) -}} -
    - -{{- end -}} diff --git a/themes/hugo-coder/layouts/partials/posts/giscus.html b/themes/hugo-coder/layouts/partials/posts/giscus.html deleted file mode 100644 index 3bf25d21..00000000 --- a/themes/hugo-coder/layouts/partials/posts/giscus.html +++ /dev/null @@ -1,33 +0,0 @@ -{{- if isset .Site.Params "giscus" -}} - {{- if and (isset .Site.Params.giscus "repo") (not (eq .Site.Params.giscus.repo "" )) (eq (.Params.disableComments | default false) false) -}} -
    - -
    - {{- end -}} -{{- end -}} diff --git a/themes/hugo-coder/layouts/partials/posts/math.html b/themes/hugo-coder/layouts/partials/posts/math.html deleted file mode 100644 index 0d2d100d..00000000 --- a/themes/hugo-coder/layouts/partials/posts/math.html +++ /dev/null @@ -1,19 +0,0 @@ -{{- if or (.Params.math) (.Site.Params.math) (.Params.katex) (.Site.Params.katex) -}} - - {{/* The loading of KaTeX is deferred to speed up page rendering */}} - - -{{- end -}} diff --git a/themes/hugo-coder/layouts/partials/posts/series.html b/themes/hugo-coder/layouts/partials/posts/series.html deleted file mode 100644 index ed5f2149..00000000 --- a/themes/hugo-coder/layouts/partials/posts/series.html +++ /dev/null @@ -1,30 +0,0 @@ -{{ $currentPageUrl := .RelPermalink }} -{{ if .Params.series }} -
    - {{ range .Params.series }} - {{ $name := . | anchorize }} - {{ $series := index $.Site.Taxonomies.series $name }} - {{ if gt (len $series.Pages) 1 }} -

    - {{ i18n "see_also" | default "See also in" }} {{ . }} - - - {{ i18n "link_to_heading" | default "Link to heading" }} - -

    - - {{ end }} - {{ end }} -
    -{{ end }} diff --git a/themes/hugo-coder/layouts/partials/posts/telegram.html b/themes/hugo-coder/layouts/partials/posts/telegram.html deleted file mode 100644 index 2cdae652..00000000 --- a/themes/hugo-coder/layouts/partials/posts/telegram.html +++ /dev/null @@ -1,22 +0,0 @@ -{{- if isset .Site.Params "telegram" -}} - {{- if and (isset .Site.Params.telegram "siteid") (not (eq .Site.Params.telegram.siteID "" )) (eq (.Params.disableComments | default false) false) -}} -
    - -
    - {{- end -}} -{{- end -}} diff --git a/themes/hugo-coder/layouts/partials/posts/utterances.html b/themes/hugo-coder/layouts/partials/posts/utterances.html deleted file mode 100644 index 5388cf66..00000000 --- a/themes/hugo-coder/layouts/partials/posts/utterances.html +++ /dev/null @@ -1,33 +0,0 @@ -{{- if isset .Site.Params "utterances" -}} -{{- if and (isset .Site.Params.utterances "repo") (not (eq .Site.Params.utterances.repo "" )) (eq -(.Params.disableComments | default false) false) -}} -
    - -
    -{{- end -}} -{{- end -}} \ No newline at end of file diff --git a/themes/hugo-coder/layouts/partials/taxonomy/authors.html b/themes/hugo-coder/layouts/partials/taxonomy/authors.html deleted file mode 100644 index bfd6b0cb..00000000 --- a/themes/hugo-coder/layouts/partials/taxonomy/authors.html +++ /dev/null @@ -1,9 +0,0 @@ -
    - - {{- range $index, $el := . -}} - {{- if gt $index 0 }} - - {{- end }} - {{ .LinkTitle }} - {{- end -}} -
    diff --git a/themes/hugo-coder/layouts/partials/taxonomy/categories.html b/themes/hugo-coder/layouts/partials/taxonomy/categories.html deleted file mode 100644 index aec7953b..00000000 --- a/themes/hugo-coder/layouts/partials/taxonomy/categories.html +++ /dev/null @@ -1,9 +0,0 @@ -
    - - {{- range $index, $el := . -}} - {{- if gt $index 0 }} - - {{- end }} - {{ .LinkTitle }} - {{- end -}} -
    diff --git a/themes/hugo-coder/layouts/partials/taxonomy/tags.html b/themes/hugo-coder/layouts/partials/taxonomy/tags.html deleted file mode 100644 index 47fc5f2d..00000000 --- a/themes/hugo-coder/layouts/partials/taxonomy/tags.html +++ /dev/null @@ -1,11 +0,0 @@ -
    - - {{- range $index, $el := . -}} - {{- if gt $index 0 }} - - {{- end }} - - {{ .LinkTitle }} - - {{- end -}} -
    diff --git a/themes/hugo-coder/layouts/partials/terms.html b/themes/hugo-coder/layouts/partials/terms.html deleted file mode 100644 index 864dc9f4..00000000 --- a/themes/hugo-coder/layouts/partials/terms.html +++ /dev/null @@ -1,29 +0,0 @@ -
    -
    -

    - - {{- if eq .Kind "term" -}} - {{- i18n .Data.Plural | title -}} - {{- print ": " -}} - {{- end -}} - {{- i18n (lower .Title) | default .Title | title -}} - -

    -
    - {{ .Content }} -
      - {{ $type := .Type }} - {{ range $key, $value := .Data.Terms.Alphabetical }} - {{ $name := .Name }} - {{ $count := .Count }} - {{ with $.Site.GetPage (printf "/%s/%s" $type $name) }} -
    • - - {{ .Name }} - {{ $count }} - -
    • - {{ end }} - {{ end }} -
    -
    diff --git a/themes/hugo-coder/layouts/posts/li.html b/themes/hugo-coder/layouts/posts/li.html deleted file mode 100644 index 79b53cb5..00000000 --- a/themes/hugo-coder/layouts/posts/li.html +++ /dev/null @@ -1,4 +0,0 @@ -
  • - {{ .Date | time.Format (.Site.Params.dateFormat | default "January 2, 2006" ) }} - {{ .Title }} -
  • diff --git a/themes/hugo-coder/layouts/posts/list.html b/themes/hugo-coder/layouts/posts/list.html deleted file mode 100644 index 17abb713..00000000 --- a/themes/hugo-coder/layouts/posts/list.html +++ /dev/null @@ -1,22 +0,0 @@ -{{ define "title" }} - {{ title (i18n (lower .Title)) | default .Title }} · {{ .Site.Title }} -{{ end }} -{{ define "content" }} -
    -
    -

    - - {{ title (i18n (lower .Title)) | default .Title }} - -

    -
    - {{ .Content }} -
      - {{- range .Paginator.Pages -}} - {{- .Render "li" -}} - {{- end -}} -
    - - {{ partial "pagination.html" . }} -
    -{{ end }} diff --git a/themes/hugo-coder/layouts/posts/single.html b/themes/hugo-coder/layouts/posts/single.html deleted file mode 100644 index 8993eafb..00000000 --- a/themes/hugo-coder/layouts/posts/single.html +++ /dev/null @@ -1,54 +0,0 @@ -{{ define "title" }} - {{ .Title }} · {{ .Site.Title }} -{{ end }} -{{ define "content" }} -
    -
    -
    -
    -

    - - {{ .Title }} - -

    -
    - -
    - -
    - {{ if .Params.featuredImage }} - Featured image - {{ end }} - {{ .Content }} -
    - - -
    - {{ partial "posts/series.html" . }} - {{ partial "posts/disqus.html" . }} - {{ partial "posts/commento.html" . }} - {{ partial "posts/utterances.html" . }} - {{ partial "posts/giscus.html" . }} - {{ partial "posts/telegram.html" . }} -
    -
    - - {{ partial "posts/math.html" . }} -
    -{{ end }} diff --git a/themes/hugo-coder/layouts/projects/li.html b/themes/hugo-coder/layouts/projects/li.html deleted file mode 100644 index 87012538..00000000 --- a/themes/hugo-coder/layouts/projects/li.html +++ /dev/null @@ -1,9 +0,0 @@ -
  • - - -
    - -

    {{ .Title }}

    -
    -
    -
  • diff --git a/themes/hugo-coder/layouts/projects/list.html b/themes/hugo-coder/layouts/projects/list.html deleted file mode 100644 index 17abb713..00000000 --- a/themes/hugo-coder/layouts/projects/list.html +++ /dev/null @@ -1,22 +0,0 @@ -{{ define "title" }} - {{ title (i18n (lower .Title)) | default .Title }} · {{ .Site.Title }} -{{ end }} -{{ define "content" }} -
    -
    -

    - - {{ title (i18n (lower .Title)) | default .Title }} - -

    -
    - {{ .Content }} -
      - {{- range .Paginator.Pages -}} - {{- .Render "li" -}} - {{- end -}} -
    - - {{ partial "pagination.html" . }} -
    -{{ end }} diff --git a/themes/hugo-coder/layouts/projects/single.html b/themes/hugo-coder/layouts/projects/single.html deleted file mode 100644 index 33720314..00000000 --- a/themes/hugo-coder/layouts/projects/single.html +++ /dev/null @@ -1,42 +0,0 @@ -{{ define "title" }} - {{ .Title }} · {{ .Site.Title }} -{{ end }} -{{ define "content" }} -
    -
    -
    -
    -

    - - {{ .Title }} - -

    -
    - -
    - -
    - {{ if .Params.featuredImage }} - Featured image - {{ end }} - {{ .Content }} -
    - - -
    - {{ partial "posts/series.html" . }} - {{ partial "posts/disqus.html" . }} - {{ partial "posts/commento.html" . }} - {{ partial "posts/utterances.html" . }} - {{ partial "posts/giscus.html" . }} - {{ partial "posts/telegram.html" . }} -
    -
    - - {{ partial "posts/math.html" . }} -
    -{{ end }} diff --git a/themes/hugo-coder/layouts/shortcodes/mermaid.html b/themes/hugo-coder/layouts/shortcodes/mermaid.html deleted file mode 100644 index 85166b1d..00000000 --- a/themes/hugo-coder/layouts/shortcodes/mermaid.html +++ /dev/null @@ -1,3 +0,0 @@ -
    - {{- .Inner | safeHTML }} -
    diff --git a/themes/hugo-coder/layouts/shortcodes/notice.html b/themes/hugo-coder/layouts/shortcodes/notice.html deleted file mode 100644 index 772a32c7..00000000 --- a/themes/hugo-coder/layouts/shortcodes/notice.html +++ /dev/null @@ -1,9 +0,0 @@ -{{- $type := .Get 0 -}} -{{- $title := .Get 1 | default $type -}} -{{- $inner := .Inner | .Page.RenderString | chomp -}} -{{- $icon := dict "note" "fa-sticky-note" "tip" "fa-lightbulb-o" "example" "fa-file-text" "question" "fa-question" "info" "fa-exclamation-circle" "warning" "fa-exclamation-triangle" "error" "fa-times-circle" -}} -
    -
    - {{- $inner -}} -
    -
    diff --git a/themes/hugo-coder/layouts/shortcodes/tab.html b/themes/hugo-coder/layouts/shortcodes/tab.html deleted file mode 100644 index 0fdd14ed..00000000 --- a/themes/hugo-coder/layouts/shortcodes/tab.html +++ /dev/null @@ -1,17 +0,0 @@ -{{ $group := .Page.Scratch.Get "tabGroupIndex" | default 0 }} -{{ $index := .Page.Scratch.Get "tabElementIndex" | default 0 }} -{{ $name := .Get "name" | default "Name Me!" }} - - - - - -
    - {{ .Inner | markdownify }} -
    - -{{ .Page.Scratch.Set "tabElementIndex" (add 1 $index) }} diff --git a/themes/hugo-coder/layouts/shortcodes/tabgroup.html b/themes/hugo-coder/layouts/shortcodes/tabgroup.html deleted file mode 100644 index 7a190054..00000000 --- a/themes/hugo-coder/layouts/shortcodes/tabgroup.html +++ /dev/null @@ -1,10 +0,0 @@ -{{ $align := .Get "align" | default "left" }} -{{ $style := .Get "style" | default "markdown" }} -{{ $group := .Page.Scratch.Get "tabGroupIndex" | default 0 }} -{{ $index := .Page.Scratch.Set "tabElementIndex" 0 }} - -
    - {{ .Inner }} -
    - -{{ .Page.Scratch.Set "tabGroupIndex" (add 1 $group) }} diff --git a/themes/hugo-coder/theme.toml b/themes/hugo-coder/theme.toml deleted file mode 100644 index 480884d0..00000000 --- a/themes/hugo-coder/theme.toml +++ /dev/null @@ -1,27 +0,0 @@ -name = "Coder" -license = "MIT" -licenselink = "https://github.com/luizdepra/hugo-coder/blob/master/LICENSE.md" -description = "A simple and clean blog theme for Hugo" -homepage = "https://github.com/luizdepra/hugo-coder/" -tags = [ - "blog", - "minimal", - "minimalist", - "responsive", - "simple", - "clean", - "personal" -] -features = [ - "analytics", - "favicon", - "multilingual", - "pagination", - "single-column", - "syntax-highlighting" -] -min_version = "0.79.0" - -[author] -name = "Luiz F. A. de Prá" -homepage = "https://luizdepra.com" diff --git a/zansara/index.html b/zansara/index.html new file mode 100644 index 00000000..79e9573f --- /dev/null +++ b/zansara/index.html @@ -0,0 +1,10 @@ + + + + https://www.zansara.dev/about/ + + + + + +