From 8b1a09531450ec1f3c7044de3e37ddfca72b6e55 Mon Sep 17 00:00:00 2001 From: Aaqa Ishtyaq Date: Wed, 6 Mar 2024 00:02:32 +0530 Subject: [PATCH] feat: add existing blogs from current website Signed-off-by: Aaqa Ishtyaq --- ...-01-08-algorithms-in-python-bubble-sort.md | 147 +++++ ...8-01-18-algorithms-in-python-quick-sort.md | 100 +++ ...2018-02-03-dont-be-scared-switch-to-vim.md | 93 +++ .../notes/2018-02-05-mind-blowing-git-tips.md | 116 ++++ .../2018-02-15-mind-blowing-python-tips.md | 190 ++++++ ...8-03-28-basic-file-operations-in-python.md | 156 +++++ ...018-05-07-deploy-django-application-git.md | 236 +++++++ ...e-ultimate-setup-for-remote-development.md | 116 ++++ ...5-09-the-ultimate-postgresql-cheatsheet.md | 576 ++++++++++++++++++ ...06-02-build-deploy-react-app-with-nginx.md | 261 ++++++++ .../notes/2018-09-01-scrapy-splash-setup.md | 115 ++++ content/notes/2022-07-29-git-exclude-files.md | 33 + .../2023-01-26-linux-container-networking.md | 5 +- 13 files changed, 2141 insertions(+), 3 deletions(-) create mode 100644 content/notes/2018-01-08-algorithms-in-python-bubble-sort.md create mode 100644 content/notes/2018-01-18-algorithms-in-python-quick-sort.md create mode 100644 content/notes/2018-02-03-dont-be-scared-switch-to-vim.md create mode 100644 content/notes/2018-02-05-mind-blowing-git-tips.md create mode 100644 content/notes/2018-02-15-mind-blowing-python-tips.md create mode 100644 content/notes/2018-03-28-basic-file-operations-in-python.md create mode 100644 content/notes/2018-05-07-deploy-django-application-git.md create mode 100644 content/notes/2018-05-08-the-ultimate-setup-for-remote-development.md create mode 100644 content/notes/2018-05-09-the-ultimate-postgresql-cheatsheet.md create mode 100644 content/notes/2018-06-02-build-deploy-react-app-with-nginx.md create mode 100644 content/notes/2018-09-01-scrapy-splash-setup.md create mode 100644 content/notes/2022-07-29-git-exclude-files.md diff --git a/content/notes/2018-01-08-algorithms-in-python-bubble-sort.md b/content/notes/2018-01-08-algorithms-in-python-bubble-sort.md new file mode 100644 index 0000000..2c9eb8a --- /dev/null +++ b/content/notes/2018-01-08-algorithms-in-python-bubble-sort.md @@ -0,0 +1,147 @@ ++++ +title = "Algorithms in Python: Bubble Sort" +date = "2018-01-08" ++++ + +## Some theory + +Bubble sort is another commonly known sorting algorithm. The idea here is to +scan a list of items (say integers) sequentially (from left to right) and +compare consecutive pairs of elements starting at index 0. + +Example: +```python + +my_numbers = [92,11,45,2234,0,7,65] +# 92 is index 0 and the consecutive pairs are +# (92,11), (11,45), (45,2234) and so on ... +``` +At first we compare elements (list[0],list[1]) then (list[1],list[2]) then +(list[2],list[3]) and so on until the end of the list is reached. + +When comparing we check if element i is greater than element i + 1, if they are +we just swap the two elements and move on to the next pair. If they are not this +means that the pair is already sorted, so we also move on to the next pair. + +Example: +```python +my_numbers = [92,11,45,2234,0,7,65] + +# Let's compare my_numbers[0] and my_numbers[1] +if my_numbers[0] > my_numbers[1]: + swap(my_numbers[0], my_numbers[1]) + +print(my_numbers) # [11, 92, 45, 2234, 0, 7, 65] +``` + +This process has to be repeated for however many items are on the list. So if +the list holds 9 items, it means we need to loop through it 9 times at most. +But what if our original list is partially sorted? We might not need 9 passes +through the list. + +One way for us to know that the list is fully sorted is if we have made no +swaps during our pass. For that, we need a variable to keep track of how many +swaps were made during a pass. + +Example: +```python +my_numbers = [92,11,45,2234,0,7,65] + +# Elements (0,1) are compared and swapped. List is now 11,92,45,2234,0,7,65 +# Elements (1,2) are compared and swapped. List now 11,45,92,2234,0,7,65 +# Elements (2,3) are compared and not swapped. List remains the same. +# Elements (3,4) are compared and swapped. List is now 11,45,92,0,2234,0,7,65 +# Elements (4,5) are compared and swapped. List is now 11,45,92,0,7,2234,65 +# Elements (5,6) are compared and swapped. List is now 11,45,92,0,7,65,2234 + +# This represents one unique pass through the list. +``` + +Notice how after each pass the highest value number is pushed at len(list) - 1. + +## Some code + +Let's look at how to implement Bubble Sort using Python: + +```python +def bubble_sort(some_list): + + is_sorted = False + + while not is_sorted: + + is_sorted = True + + for i in range(0, len(some_list) - 1): + + if some_list[i] > some_list[i + 1]: + + some_list[i], some_list[i+1] = some_list[i+1], some_list[i] + is_sorted = False +``` +This works right and it will sort any list you throw at it. However we can +slightly optimise it: We know that, after each pass the highest value element is +guaranteed to be sorted and placed at len(some\_list) - 1. Because of this, for +each subsequent pass, we can stop comparing the last sorted item. instead of +comparing pairs that we know are already sorted. +This is what it looks like: + +```python +def bubble_sort(some_list): + + is_sorted = False + last_sorted_item = len(some_list) - 1 + + while not is_sorted: + + is_sorted = True + + for i in range(0, last_sorted_item): + + if some_list[i] > some_list[i + 1]: + + some_list[i], some_list[i+1] = some_list[i+1], some_list[i] + is_sorted = False + + last_sorted_item -= 1 +``` + +After each pass through the loop, we know the right side of the list is sorted +so we decrement the value of last\_sorted\_item. What this means is that the 1st +pass will loop from 0 to len(some\_list) -1, the second time, it will be from 0 +to len(some\_list) - 2 and so on ... + +## Time complexity + +The rate of growth of this algorithm is quadratic. Expressed as O(n^2) in +"big-oh" notation. + +```python +def bubble_sort(some_list): + + is_sorted = False # time here is constant + last_sorted_item = len(some_list) - 1 + + while not is_sorted: # We go through this first loop n times + + is_sorted = True + + for i in range(0, last_sorted_item): # we go through this loop n-1 times + + if some_list[i] > some_list[i + 1]: + + # execution here is constant + some_list[i], some_list[i+1] = some_list[i+1], some_list[i] + is_sorted = False + + last_sorted_item -= 1 # constant time +``` + +It's O(n^2) because, for each pass through the loop n times, we loop n times +through the consecutive pairs. It's not a very efficient algorithm when used on large samples of data. It should only be used if you have a +specific case on a small data set. + +Next in the series is QuickSort, another interesting and more efficient sorting +algorithm. As always, if you have questions, comments or if you spotted a typo +or a mistake, please feel free to let me know on Twitter, I'm +[@aaqaishtyaq](https://twitter.com/zabanaa) and always happy to help! diff --git a/content/notes/2018-01-18-algorithms-in-python-quick-sort.md b/content/notes/2018-01-18-algorithms-in-python-quick-sort.md new file mode 100644 index 0000000..83e5fac --- /dev/null +++ b/content/notes/2018-01-18-algorithms-in-python-quick-sort.md @@ -0,0 +1,100 @@ ++++ +title = "Algorithms in Python: Quick Sort" +date = "2018-01-08" ++++ + +## Theory + +Quicksort is a "divide and conquer" type of algorithm. The good thing about +it is that the worst case can almost always be avoided by using what is called a +randomized version of quicksort (more on that later). + +The idea of Quicksort is to take an unsorted list and select an element (on that +list) called a "pivot". Then the list is rearranged such that all elements greater +(in value) than the pivot are placed to its right, and all elements lesser (in +value) are placed to its left. + +This process is called partitioning. At this stage in the execution of the +algorithm, the order of the elements doesn't matter so long as the +lesser/bigger values are placed on the correct side of the pivot. + +Partitioning will produce two sublists with the pivot as a separator ( +this is because the pivot will be at its natural place after the first pass aka +sorted). The problem then becomes sorting these two sublists. + +*Note: Partitioning does not require creating copies of the +list, we work on it directly as long as we keep track of the start and end +indices of each sublist.* + +To sort the two sublists, we can apply the same logic as above (choosing a +pivot, and sorting the two resulting sublists) because QuickSort is a recursive +algorithm. + +When a sublist only contains a single element, it's already sorted so we can +stop the recursion at this point, it's our exit condition. + +**Note on choosing a pivot** + +Some people use the last item of the list, and some people use the median of the +first, last, and medium elements but the most common way is to choose a random +pivot to ensure `n log n` execution. + +## Some Code + +```python +def swap_values(lst, val1, val2): + lst[val1], lst[val2] = lst[val2], lst[val1] + +def quicksort(array, start, end): + + if start < end: + + partition_index = partition(array, start, end) # + quicksort(array, start, partition_index - 1) + quicksort(array, partition_index + 1, end) + +def partition(array, start, end): + + pivot = end + partition_index = start + + for i in range(start, end): + + if array[i] < array[pivot]: + print("{} is less than {}".format(array[i], array[pivot])) + swap_values(array, partition_index, i) + partition_index += 1 + + array[pivot], array[partition_index] = array[partition_index], array[pivot] + return partition_index +``` + +A randomized version of Quicksort would look similar to what's above except that +we must randomize the selection of our pivot. + +```python +import random +# ... +def partition(array, start, end): + if start < end: + pivot = random.randint(start, end) + array[end], array[pivot] = array[pivot], array[end] + partition_index(array, start, end) + # ... +``` +Here, we set the pivot to a random integer in the range between `start` +and `end`. Then, we swap the value at that index with the value at array[end]. +If you run the code successively, you'll notice that the pivot is +different every time. It's a nice optimization that can save some time. + +## Time Complexity + +It's one of the most efficient sorting algorithm. In fact, most sorting +functions that come packaged in many language's standard libraries use an +implementation of QuickSort. + +The order of growth for QuickSort in the worst case is quadratic O(n^2). The +average case, however, which is the most common scenario, has a complexity of +O(n log n). + +QuickSort works best when used on large sets of data because of its recursive nature. diff --git a/content/notes/2018-02-03-dont-be-scared-switch-to-vim.md b/content/notes/2018-02-03-dont-be-scared-switch-to-vim.md new file mode 100644 index 0000000..f73f6ab --- /dev/null +++ b/content/notes/2018-02-03-dont-be-scared-switch-to-vim.md @@ -0,0 +1,93 @@ ++++ +title = "Don't be scared. Switch to vim." +date = "2018-02-03" ++++ + +I'm currently sitting at the most boring meetup I've probably ever attended in +Delhi. It's about chatbots. I don't care about chatbots, I care about free +stickers and pizza. So I'll take this opportunity to open up about a subject +that's dear to my heart: vim. + +I used to believe vim was exclusive to this superior race of developers who +gulp coffee like it's water and seem to only read HN and nothing else. (Hi, if +you're coming from HN). Architecture and Software design comes naturally to them, +they never run into bugs and they can recognize the most obscure of algorithms +at a glance (Shout out to Shashank, one of my mentors). + +Shanky is a good, productive developer. Shany uses vim. I want to be like Shanky. I want +to use vim. + +There are a million reasons why you should jump ship and join the cult. In the +next paragraphs, I will detail some of these reasons. + +## It's not (that) hard + +There's a learning curve to vim. But it's worth the +trouble. And if you're on Linux or MacOS, there's a built-in tool called +`vimtutor` (just fire it up from a terminal, I am not sure about Windows though) and +a wide variety of online tools to learn vim. Namely [openvim][0], +[vim adventures][1], and [vim genius][2]. + +Personally, The way I learned was by using it on small, fun side projects of +mine during the weekends, mostly to become familiar with the new mental model. +And just like everything in life, shit takes time, and practice makes perfect. +So keep at it and you'll eventually come to your "aha" moment. +As you get more and more comfortable using vim, it will become harder and harder +to go back to a regular editor / IDE. + +## It's Fast and Customisable + +Because it runs on the terminal, you'll never have to wait 20 seconds to get +on with your work. (Atom anyone ?) + +And if you like pretty things, there's a [large selection of colorschemes][11] +for you to choose from. On top of that, there's a plugin for just about anything +you might need. And if there isn't, you can program your own. + +## Ubiquity + +Not really, but I wanted to place a complicated word to sound smart. +Seriously though, it's everywhere. On Mac OS, Windows and of course Linux/Unix. If +you work on remote servers you can quickly edit files on the fly without having +to use nano. (Don't use nano) + +Say for example a coworker/friend is running into a bug, you come to help and +they're using an IDE you're not familiar with, well you can just access the files +from their terminal and start debugging right away. + +Or if you're like me, and you spill water on your Macbook keyboard and it becomes +toast, you can spin up a VPS on Digital Ocean or AWS, and pick up where you +left off (almost) right away. + +## Bonus: Some of my favorite plugins + +My color scheme of choice (at the time of writing) is [afterglow][10]. + +And here's a list of my favorite plugins: + + +- [ Nerdtree ][3] (A tree explorer much like the sidebar in traditional IDEs) +- [ Airline ][4] (A sleek, customizable status bar) +- [ Surround ][5] (Helpful tool that helps with "surrounding" words with brackets etc) +- [ CtrlP ][6] (A fuzzy finder for vim) +- [ UtilSnips ][7] (Snippet utility for many languages) +- [ Vim Markdown][8] (Markdown syntax highlighting) +- [ Goyo ][9] (Allows for distraction-free editing) + +I'll end this article with a quote from a Chamillionaire: +> They see you vimmin', they hatin'. Patroling they tryna catch me coding dirty + +[0]: http://www.openvim.com/ +[1]: https://vim-adventures.com/ +[2]: http://www.vimgenius.com/ +[3]: https://github.com/scrooloose/nerdtree +[4]: https://github.com/vim-airline/vim-airline +[5]: https://github.com/tpope/vim-surround +[6]: https://github.com/kien/ctrlp.vim +[7]: https://github.com/SirVer/ultisnips +[8]: https://github.com/plasticboy/vim-markdown +[9]: https://github.com/junegunn/goyo.vim +[10]: https://github.com/danilo-augusto/vim-afterglow +[11]: http://vimcolors.com/ + + diff --git a/content/notes/2018-02-05-mind-blowing-git-tips.md b/content/notes/2018-02-05-mind-blowing-git-tips.md new file mode 100644 index 0000000..92350f2 --- /dev/null +++ b/content/notes/2018-02-05-mind-blowing-git-tips.md @@ -0,0 +1,116 @@ ++++ +title = "Mind-blowing git tips for beginners" +date = "2018-02-05" ++++ + +As developers, we all (hopefully) use git. It's not extremely hard nor time-consuming to get started with it and you will surely thank your future self for taking the time to learn it. Ok so that was for the cringy intro. + +Now let's get down to business: what I really want to share in this post is a list of tricks I've learned during the past 2 1/2 years of using git. Some of it might seem trivial to seasoned developers but if you're just getting started, stick with me because this might just blow your mind. + +## The difference between git fetch and pull + +`git fetch` only updates the tracking remote branches. If you actually want to +update the local repo, you need to merge the local branch with the remote +tracking branch using `git merge`. + +**remote branches are prefixed with the name of the remote and a slash: +origin/branchname** + +git pull on the other hand, will execute both commands for you so when you issue +a `git pull` in a branch it will fetch that branch from the remote repo **and** +merge it with yours. + +To list all remote trackin branches, `git branch -r` is your friend. + +## Merge conflicts + +At some point you'll inevitably have to deal with merge conflicts. Essentially what this means is that git noticed a file was modified on both branches and it does not know which version is correct. It leaves it up to you to decide which one you want to push. + +if you open the file you will see a `HEAD` part prefixed with `<<<<` signs and a + second part containing the code on the branch you're trying to merge. Both + sections are separated with an equal sign. + +The top HEAD section contains the version of the file as it is on your current +branch. The other shows you what the code looks like on the branch you're trying to merge from. + +To resolve the conflict just delete the part you don't want (Including all the equal signs etc), save the file and commit it again. + +Note that as a safety measure it's always good practice to do a `git pull` to +see if you don't have any remaining conflicts. If not you can just push your code and you're all set! + +As for the tips: + +`git merge --abort` will clean up the current working directory and go back to +the last version before the merge (it pretty much nullfies the merge). + +`git merge --squash` is an interesting one as it will bring the changes you made in the feature branch and create a new commit on your current branch without +mixing the two histories. + +## Dealing with files + +Now this one's tricky ! + +To remove a file from **both** the working directory and version control, you +can use `git rm ` but if you already deleted a file in the cli like I (and almost everyone) usually do then just run `git rm ` and it will stage it as deleted so you can commit. + +But what if you ramrafed (`rm -rf`) a bunch of files in your working directory? +You don't really want to manually stage every file for deletion, do you? (DO YOU ?) So what do you do in that case? Well, you just run `git add -u` (u for updating the working tree) and all of your deleted files will be staged so you can commit and push. + +"What if a man accidentally tracked a file and man doesn't want it deleted +from the index ?" I hear you ask with an exquisite South London accent. + +Well, my G, just run: + +```bash +git rm --cached +``` + +and you're done. Efficience ting. + +Something else that happens quite often is moving/renaming files (which is +essentially the same thing on Linux systems, as the path/address of the file +changed). + +Here's an example: imagine you have a file called index.html that you +want to rename to home.html. you can use `git mv` which takes a source and a +destination: + +```bash +git mv index.html home.html +``` + +if you want to move **and** rename just run + +```bash +git mv index.html path/to/home.html +``` + +However if you moved / renamed the file manually on the command line, running +`git status` will tell you that index.html was deleted and home.html is untracked. + +To fix this you will have to run two commands + +```bash +git rm index.html # aka the file you moved/renamed +``` + +```bash +git add home.html # aka the file with the new name x path +``` + +Running git status again will mark the file as renamed. + +Note that this also works if you don't rename the file. For example, if you just +want to move index.html to src/index.html. The same command will apply +(`git mv index.html src/index.html`). + +The other way of achieving this is: + +```bash +git add -A +``` +which will pick up the changes and automatically stage them for commit. + +So there you go, I hope you learned something useful in this article. If you know +other mind-blowing tips and tricks for git, [@ me on twitter](https://twitter.com/aaqaishtyaq). + diff --git a/content/notes/2018-02-15-mind-blowing-python-tips.md b/content/notes/2018-02-15-mind-blowing-python-tips.md new file mode 100644 index 0000000..f79677b --- /dev/null +++ b/content/notes/2018-02-15-mind-blowing-python-tips.md @@ -0,0 +1,190 @@ ++++ +title = "Mind-blowing Python tips" +date = "2018-02-15" ++++ + +## 0 - Loop over a range of numbers +Use `range` instead of `xrange`. +In python3, the former creates an iterator that produces the values one at +a time making it much more efficient and fast. + +```python + +nums = [0,2, 34, 55, 32] +for i in range(nums): + print i + +``` + +## 1 - Looping backwards +.reversed use Just + +```python + +names = ["Case", "Molly", "Armitage", "Maelcum"] +for name in reversed(names): + print name + +``` + +## 2 - Looping over a list and its indices + +To keep track of the index of each item in a collection, enumerate is your buddy. + +```python +names = ["Case", "Molly", "Armitage", "Maelcum"] +for index, name in enumerate(names): + print index, name + +``` + +## 3 - Looping over two lists simultaneously +Yeah you could use zip, but izip is faster, so use that instead. + +```python + +from itertools import izip + +names = ["Case", "Molly", "Armitage", "Maelcum"] +ages = [23, 27, 41, 24] +for name, age in izip(names, ages): + print name, age + +``` + +## 4 - Looping over a sorted list + +You can sort out the list first and then loop through it, or you could use +sorted. + +```python + +names = ["Case", "Molly", "Armitage", "Maelcum"] +for name in sorted(names): + print name + +``` +And BAM, you're ... sorted. + +## 5 - Call a function until a sentinel value is returned + +To do that, use iter(). + +Bad example: + +Loop over a file containing a list of names +until the loop returns an empty string, +in which case we break out of it. + +```python + +names = [] +while True: + name = file.read(32) + if name = "": + break + names.append(name) +``` + +Beautiful example: + +In this case, we call a function (f.read) until it returns the sentinel value +passed as a second argument to iter. +That way we avoid having to make the unnecessary if check. + +```python +for name in iter( partial(f.read(32)), ""): + print name +``` + +## 6 - Looping over a dictionary + +The normal way to do it: + +```python +molly = { "name": "Molly Millions", "Age": 27, "Occupation": "Professional Killer"} + +for key in molly: + print key +``` +If you wish to mutate the data, prefer `dict.keys()`. + +```python + +molly = { "name": "Molly Millions", "Age": 27, "Occupation": "Professional Killer"} + +for key in molly.keys(): + # do the mutation + +``` + +## 7 - Looping over a dict keys AND values + +Don't do this: + + +```python + +molly = { "name": "Molly Millions", "Age": 27, "Occupation": "Professional Killer"} + +for key in molly: + print molly[key] + +``` + +It's slow because we have to rehash the dictionary and do a lookup everytime. + +Instead choose `iteritems()`: + +```python + +molly = { "name": "Molly Millions", "Age": 27, "Occupation": "Professional Killer"} + +for key, value in molly.iteritems(): + print key, value + +``` + +## 8 - Create a dict out of two lists + +Just instantiate a new dict with two zipped lists. Real magic. + +```python + +from itertools import izip + +names = ["Case", "Molly", "Armitage", "Maelcum"] +ages = [23, 27, 41, 24] + +characters = dict(izip(names, ages)) + +``` + +## 9 - Use named tuples for returning multiple values + +Like in the case of an API response in Flask. + +```python + +from collections import namedtuple + +Response = namedtuple('APIResponse', ['status_code', 'body', 'headers']) + +@app.route('/users/1'): + + try: + user = db.getuserbyid(1) + except: + return Response(404, user.notfound(), {'content-type': 'application/json'} + else: + return Response(200, user.json(), {'content-type': 'application/json'} + +``` + +## Other + +* Always clarify function calls by using keyword arguments + +If you learned something from this article, share it with your co-workers and +fellow hackers. If you notice any typo, error etc let me know on +[twitter](https://twitter.com/aaqaishtyaq). diff --git a/content/notes/2018-03-28-basic-file-operations-in-python.md b/content/notes/2018-03-28-basic-file-operations-in-python.md new file mode 100644 index 0000000..2f3eade --- /dev/null +++ b/content/notes/2018-03-28-basic-file-operations-in-python.md @@ -0,0 +1,156 @@ ++++ +title = "Basic file operations in Python" +date = "2018-03-28" ++++ + +In this short (spoiler: it's actually quite lengthy) post, I will be going +through a list of very useful and handy methods in the `os` module +(which is part of the Python standard library) for handling files and +directories. + +## 1. Create a directory + +This one is pretty straightforward. If you're comfortable with the linux shell, +you know that `mkdir` is the command to use to create directories. +Unsurprisingly, Python uses the same naming convention. + +Example: + +```python +import os + +os.mkdir("my_awesome_directory") +``` + +The method takes a string its argument and will create the directory under the +file's parent folder. (For instance, if the path to the file calling os.mkdir() +is /home/username/Documents/app.py, the "my\_awesome\_directory" will be created +under /home/username/Documents) + +## 2. Get a file's parent directory. + +This is useful if you want to get the path to a file/folder that you know is +under the working file's parent directory. If this was confusing, here's an +example. + +In the Linux shell (bash or otherwise), you can issue the command `pwd` +(which I believe stands for "print working directory") to quickly print your +current location within a given session. + +In Python you would achieve this like so: + +```python +import os + +CURRENT_DIR = os.getcwd() +``` + +Notice I used all caps for the variable name. This is because it's usually a +constant. This variable isn't meant to be changed. +You can now use this variable to locate any file or folder within that +directory. Keep reading and I'll show you how. + +## 3. Concatenate paths + +This is something you'll find yourself doing a lot. Especially on large projects +that require configuration files and other such things. If you have experience +working with Django for example, they have a `settings.py` file littered with +calls to the os module. There are many benefits to this approach. Perhaps the +most obvious being is that if you ever decide to move your project to another +location, you don't want to keep modifying the path every time. Remember, +programming is all about being lazy. + +So this is how you would do it: + +```python +import os + +# Assuming this file is located at /home/username/myproject/app.py +# and that you want to operate on a a file called config.cfg within the same +# directory : + +CURRENT_DIR = os.getcwd() # evaluates to /home/username/myproject +MY_TEXT_FILE = os.path.join(CURRENT_DIR, "config.cfg") + +print(MY_TEXT_FILE) # /home/username/myproject/config.cfg +``` +_An important note: os.path.join() merely concatenates the two paths together. +It doesn't check whether the path is valid. So be careful when using this +method. Also notice how the method call is to os.path.join() and not os.join()_ + + +## 4. Check that a path exists + +The other day I was working on a small web scraper for a side project of mine. +After the it was done fetching data, my script would save the results into a +pickle file (don't worry if you don't know what it is) that would be read by my +program, saving me the trouble of sitting there waiting to fetch the same info +over and over again each time I run the script. + +The solution was to tell my script to check whether a specific file (let's call +it results.pkl) exists at a given path. If it does, the program continues and +if not, the program executes the crawler function. + +This is clever because now I only have to fetch the data and if the file gets +deleted I know I can rely on the program to go and crawl the sites as expected. + +And now for the example: + +```python +import os + +CURRENT_DIR = os.getcwd() +RESULTS_FILE = os.path.join(CURRENT_DIR, "results.pkl") + +def crawl_data(): + # scrapes a bunch of websites and saves the result in a file called + # results.pkl under the current directory + pass + +if not os.path.exists(RESULTS_FILE): + crawl_data() + +else: + ## the file exists so we can open it and work with its content +``` + +The same thing can be done to check that the path exists AND that it's a +directory: + +```python +CURRENT_DIR = os.getcwd() +MY_DIRECTORY = os.path.join(CURRENT_DIR, "my_directory") + +if os.path.exists(MY_DIRECTORY) and os.path.isdir(MY_DIRECTORY): + ## do something with the files inside the folder +else: + os.mkdir(MY_DIRECTORY) +``` + +## 5. List files within a given directory + +Very useful when you want to read several files that are under the same +directory. +It can be done in two ways: conventional and pythonic. I'll show you both. + +```python +for file in os.listdir("/path/to/dir"): + # do something with the filename (open it, copy it, move it, rename it...) + +``` +or + +```python +filenames = [file for file in os.listdir("/path/to/dir")] +``` + +Guess which way is more pythonic! + +## Conclusion + +These have been the most useful file/directory functions for me in Python. I +really love the fact that the method names sound natural and are (for the most +part) similar to linux commands. What are your favourite file operation methods? Any tip or trick you want to share with me? Something I've missed? Ping on +twitter! I'm [@aaqaishtyaq](https://twitter.com/aaqaishtyaq). + +If you found this article useful, please share it with your nerd friends/coworkers and spread the word! diff --git a/content/notes/2018-05-07-deploy-django-application-git.md b/content/notes/2018-05-07-deploy-django-application-git.md new file mode 100644 index 0000000..43fda41 --- /dev/null +++ b/content/notes/2018-05-07-deploy-django-application-git.md @@ -0,0 +1,236 @@ ++++ +title = "Deploy your Django application with git" +date = "2018-05-07" ++++ + +I'm going to make a bold statement: Django replaced Ruby on Rails in the hearts +of many developers. With this increase in popularity, we've seen tons or +articles, videos and websites dedicated to setting un Django and creating apps +using the framework. + +Unfortunately, when it comes to deployment, many of these resources only mention +heroku or pythonanywhere. While these are excellent solutions for quickly +shipping your MVP or prototype, it lacks a bit in terms of flexibility if you +want to create your custom deployment pipeline. + +**tl-dr: If you manage your own server infrastructure, we're going to setup a git +deployment workflow with django** + +## What you'll need + +* Working knowledge of ssh +* Working knowledge of git +* Working knowledge of the bash shell +* Basic linux command line skills (sorry hipsters) +* (Very) Basic knowledge of vi/vim +* Patience + +The typical workflow usually looks like this: + +* You have your development environment, either on your local machine or a remote server +* A git server (on GitHub, BitBucket, GitLab ...) that you and your team push your work to +* A production server (aka your live app). + +Usually when you commit and push work you do something like: + +```bash +git push origin +``` +`origin` being the name of the remote server your code is being pushed to. +What took me a while to realise is that you can have many remotes for your repo +that point to different servers. + +The idea here is to add a new remote to our repo. It will point to our +production server such that when we run `git push live master`, our code will be +copied over to there. + +## On the production server + +To achieve this, we have some setup work to do on our live server. So go ahead +and connect to it via ssh. + +```bash +ssh user@host_or_ip_address + +# If your server's ssh service listens on a port other than 22, you'll need +# to add the -p switch + +ssh -p PORT user@host_or_ip_address +``` +Once we're in, we need to create a new directory for our application. This is +where our deployed code will be copied to. + +```bash +mkdir -p /home/user/sites/myawesomedjangoproject + +# Some people prefer to use /var/www/, it's really up to you. Just +# make sure remember the path to your project +``` + +Now head over to `/var` and create another directory called `repos` + +```bash +cd /var +sudo mkdir repos +# Depending on your setup, you might need sudo priveleges +``` + +Inside that directory, we need to create a folder named after our project +(or domain name) and append it with `.git` (not necessary but it's good practice) + +```bash +sudo mkdir myawsomedjangoproject.com.git +``` + +Inside this folder we'll create what is called a `bare` repository. To do this +just run: + +```bash +git init --bare +``` + +If you run `ls` inside that folder you'll see a bunch of files and directories +(the same ones found inside the `.git` folder in normal repos). One of these +directories is called `hooks`. + +Inside that folder, we'll need create a file called post-receive. + +```bash +# Assuming you are inside /var/repos/yourproject.git +sudo touch hooks/post-receive +``` +Now open it up with vi/vim + +```bash +sudo vim hooks/post-receive +``` +Hit `i` to switch to insert mode, and add the following to the file: + +```bash +#!/bin/bash +DEPLOYDIR=/home/username/sites/myawesomedjangoproject # or whatever path you chose +GIT_WORK_TREE="$DEPLOYDIR" git checkout -f +``` +**Please note that the first shebang line is important, it instructs git to use +bash instead of the default shell. Otherwise it won't activate our (soon to be +created) virtual environment** + +exit vim by hitting `:wq` (which in vim lingo means write and quit) + +What we've done here is set two variables. `DEPLOYDIR` is an alias for our +project path on the server, and `GIT_WORK_TREE` which is a special variable that +tells git to copy the code it receives inside of our `DEPLOYDIR`. This ensures +that we're always running the latest version of our code. + +As you've probably noticed, this post-receive file looks very much like a shell +script. That's because it is (as explained above). It's executed every time you +push code to the repo. + +The last thing we need to is make the script executable, so as soon as you're +back in the shell run: + +```bash +sudo chmod +x hooks/post-receive +``` +You can now exit the server and go back to your local machine. + +## On our local dev environment + +Now that we've created our remote repository, we need to add it to our +project (I like to call mine `live`). + +It takes one simple command: + +```bash +git remote add live root@ip_address:/var/repos/myawesomedjangoproject.git + +# And if your server's ssh service listens on a different port : + +git remote add live ssh://root@ip_address:PORT/var/repos/myawesomedjangoproject.git + +``` +To make sure it was added, you can print the list of available remotes by running: + +```bash +git remote -v # v for verbose + +``` +and that's it ! You can now make changes locally, commit and deploy +them live (or staging if it's a staging server) and see your changes instantly. + +You can obviously still push to github/lab or bitbucket with +`git push origin ` +like you normally would. + +## Bonus + +As I mentioned in the first part, the post-receive hook is a shell script. Which +means you can use it to perform all kinds of tasks against your code, like +running front-end builds, installing dependencies, etc ... + +Here's an example for a basic Django App: + +```bash +#!/bin/bash + +DEPLOYDIR=/home/username/site/myawesomedjangoproject + +echo "[log] - Starting code update " +GIT_WORK_TREE="$DEPLOYDIR" git checkout -f +echo "[log] - Finished code update " + +if [[ -d "$DEPLOYDIR/ENV_projectname"]]; then + echo "[log] - Cleaning virtualenv" + cd "$DEPLOYDIR"; rm -rf ENV_projectname cd - + echo "[log] - Finished creating virtualenv" +fi + +echo "[log] - Creating virtualenv" +cd "$DEPLOYDIR"; virtualenv -p python3 ENV_projectname; cd - +echo "[log] - Finished creating virtualenv" + +echo "[log] - Activating virtualEnv" +cd "$DEPLOYDIR"; source ENV_projectname/bin/activate; cd - +echo "[log] - Finished activating virtualenv" + +echo "[log] - Pulling down pip dependencies" +cd "$DEPLOYDIR"; pip install -r requirements.txt; cd - +echo "[log] - Finished pulling down pip dependencies" + +echo "[log] - Staring DB migration" +cd "$DEPLOYDIR"; python manage.py makemigrations; python manage.py migrate; cd - +echo "[log] - Finished DB migration " + +echo "[log] - Pulling Node Dependencies" +cd "$DEPLOYDIR"; sudo npm install; cd - +echo "[log] - Finished Pulling Node Dependencies" + +echo "[log] - Building the Front end" +cd "$DEPLOYDIR"; sudo gulp build; cd - +echo "[log] - Finished building the Front end" + +echo "[log] - Collecting static assets" +cd "$DEPLOYDIR"; python manage.py collectstatic --clear --no-input; cd - +echo "[log] - Finished collecting static assets" + +echo "[log] - Restarting App" +sudo service myawesomedjangoapp restart; +echo "[log] - Finished collecting static assets" + +``` +_I run my Django Apps as systemd services, if you don't you can just call python +manage.py runserver. If you want to know how to setup Django the way I do just +follow this very comprehensive tutorial over on [Digital Ocean][1]_ + +## Conclusion + +I am fully aware that there are more sophisticated methods of deployment through +Docker, Travis (For continious integration) etc. But if you have a small app that +you want to ship and you already have an infrastructure, I've found this method +to be more than suitable. + +Please report any missing info, mistake, error, typo. I'm on [ twitter ][0] if you +wanna chat. + +[0]: https://twitter.com/aaqaishtyaq +[1]: https://www.digitalocean.com/community/tutorials/how-to-set-up-django-with-postgres-nginx-and-gunicorn-on-ubuntu-14-04 diff --git a/content/notes/2018-05-08-the-ultimate-setup-for-remote-development.md b/content/notes/2018-05-08-the-ultimate-setup-for-remote-development.md new file mode 100644 index 0000000..eecdafc --- /dev/null +++ b/content/notes/2018-05-08-the-ultimate-setup-for-remote-development.md @@ -0,0 +1,116 @@ ++++ +title = "The ultimate setup for remote development" +date = "2018-05-08" ++++ + +I'm a programmer. Like most programmers, I use an Apple laptop. They're just the +best on the market and the best purchase you can make as a developer. No +fanboyism here, just stating facts. They're fast, well built, and durable. + +I rely on this laptop for all my personal / client work. I run vagrant boxes, docker containers in it and all the usual stuff. + +As a result, I've developed this irrational fear that it would get stolen or +fall off my bedroom window (things like that happen, trust me). Interestingly +enough, it coincides with my growing interest for remote work. + +So I started to look for ways to create a development server that would allow me +to remain productive even if I lose / break my laptop, or when I'm on the road. + +## Why + +Ok, so you've read all this and you're thinking "How is it going to benefit me ?". + +The main selling points to creating a remote development environment are the +following: + +* It's cheap +* You learn linux +* You become OS agnostic (I said no fanboyism) +* You can work from almost anywhere + + +## What you need + +### Laptop + +Because all of your work is now done on a remote machine you don't really have +to care about what computer you're using. It can be a super expensive (notice I +didn't say overpriced) Apple laptop or any of its really good windows competitors (the DELL xps 13/15 comes to mind) or even a super cheap, 35$ raspberry Pi. Some people even use chromebooks ! They stick ubuntu on them and use them as their primary machines (SSH is a bit tricky to setup on chrome OS) but hey! Official [Linux support](https://www.xda-developers.com/chromebooks-linux-app-support/) is coming for you ChromeOS folks. + +## Dev Tools ## + +### Git + +This one's obvious. While git is a life saver and a great tool for collaboration in large teams (distributed or not), you can also use it as a backup system for your code when you're a solo developer. + +### Vim + +I've used sublime text but felt bad for continuously extending the trial because I +couldn't pay for it. Then I switched back to VSCode and got frustrated by how slow it +runs after using Sublime. Then i tried vim, and I never +looked back. It's the lightest, fastest and overall best text editor out there by a huge +margin (in my opinion). + +Vim is highly customizable and lets you save your settings inside a +`.vimrc` file, which makes it version control friendly. It also makes your vim +environment 100% portable as it comes standard in most linux server distros. + +### Tmux + +The best companion to vim. Tmux is a terminal multiplexer. With Tmux you can +essentially access multiple terminal sessions inside a single window. This gives +you the ability to work on multiple projects at a time. You can even save +sessions, attach and reattach to them. It's painless. +Just like vim, it's extremely customizable. All of your settings can be stored +inside of a `.tmux.conf` file. + +### A VPS + +To start developing on a remote server, you'll need ... well, a remote +server. If you don't know what a VPS is, it stands for **Virtual Private Server**. +It's essentially a virtual machine that you pay for monthly, which runs a server +distribution of linux (Ubuntu, CentOS ...), or Unix (FreeBSD). You can connect to it via SSH (it has a public ip address) and start playing around. You have complete control over the server, you can configure it however way you want. + +There's a large range of VPS providers on the market, the most notable +ones are [Linode](https://www.linode.com/), [Digital Ocean](https://www.digitalocean.com), and [Amazon EC2](https://aws.amazon.com/ec2/). + +### Dotfiles + +As I previously mentioned above, you can save your settings for vim and tmux in +what are called **dotfiles** (files that start with a period). However dotfiles +are not exclusive to vim and tmux, you can for example save your shell configuration in a **.zshrc** or **.bashrc** config file, or your git settings in a **.gitconfig** file. + +Doing this is extremely powerful because you can store these configuration files +on github and always pull the latest version when you launch a new development server. + +You can check out my own [dotfiles](https://github.com/aaqaishtyaq/dotfiles) on github to help you get started. + +### (Optional) Ansible + +Manually setting up servers is fun for the first couple of times, then it just +becomes repetitive. And what do you do with repetitive tasks ? You automate +them. + +Ansible is a provisioning tool written in python that will help you do just that. +You give it the ip address (or addresses) of the server you want to configure and it will execute all the tasks you tell it to. + +With Ansible you can: + +* run shell commands +* install packages +* create directories and files +* add users and groups +* clone git repos +* use templates and pass variables to them +* and much more + +If you don't like Ansible, there are other server provisioning tools like +[puppet](https://puppet.com/), [chef](https://www.chef.io/) and [salt](https://saltstack.com/) that will work just as good. + + + +## Caveats + +* it takes time to learn linux and networking (SSH etc) +* vim has a learning curve to it +* you become reliant on an internet connection diff --git a/content/notes/2018-05-09-the-ultimate-postgresql-cheatsheet.md b/content/notes/2018-05-09-the-ultimate-postgresql-cheatsheet.md new file mode 100644 index 0000000..cb46aed --- /dev/null +++ b/content/notes/2018-05-09-the-ultimate-postgresql-cheatsheet.md @@ -0,0 +1,576 @@ ++++ +title = "The ultimate PostgreSQL cheatsheet" +date = "2018-05-09" ++++ + +So I had been working with Django, Flask and Express.js for a while now, and my +database of choice for every single project has of course always been Postgres. +What makes these frameworks great (regardless of language) is the ability to use +an ORM (Object Relational Mapper) that sits between your code and the DB. +It does all the heavy lifting for you and takes care of executing SQL queries on +your behalf. + +This is great but I couldn't stop thinking "what if I have to +manually debug something directly into postgres ?" This is when reality slapped +me in the face, I barely knew the SQL Language. So I thought It'd be fun to +create a cheatsheet that I (and you) could keep as a reference for these times +when you absolutely need to set this column to UNIQUE and you don't know how. + +Enjoy ! (and yeah I went all in with the pokemon references) + +### Create a User (or Role) + +```sql +/* create a user without privileges*/ +CREATE ROLE aaqa; + +/* create a user with privileges*/ +CREATE ROLE aaqa LOGIN CREATEDB CREATEROLE REPLICATION; + +/* Add privileges to existing user*/ +ALTER ROLE aaqa WITH LOGIN CREATEROLE CREATEDB REPLICATION; +``` + + +### Make a user superuser (bump their privileges) + +```sql +ALTER ROLE aaqa WITH superuser; +``` + + +### Rename an existing user + +```sql +ALTER ROLE psyduck RENAME TO brock; +``` + + +### Create a DB + +```sql +CREATE DATABASE pokemons; +``` + + +### Rename a DB + +```sql +ALTER DATABASE pokemons RENAME TO charizard; +``` + + +### Create a table in a DB + +```sql +CREATE TABLE trainers( + + id INT PRIMARY KEY NOT NULL, + pokemon_type CHAR(50), + name CHAR(50) NOT NULL UNIQUE, + gender CHAR(50) NOT NULL UNIQUE +); +``` + + +### Delete a DB + +```sql +DROP DATABASE pokemons; +``` + + +### Delete a user + +```sql +/* assuming there's a pikachu role on the system */ +DROP ROLE pikachu; +``` + + +### Change DB ownership + +```sql +ALTER DATABASE pokemons OWNER TO aaqa; +``` + + +### Rename a table + +```sql +ALTER TABLE trainers RENAME TO gym_trainers; +``` + + +### Change column type + +```sql +ALTER TABLE gym_trainers ALTER COLUMN pokemon_type TYPE TEXT; /* it makes zero +sense to want to change this column type to TEXT but YOLO */ +``` + + +### Rename a column + +```sql +ALTER TABLE gym_trainers ALTER COLUMN name RENAME TO trainer_name; +``` + + +### Add a column to a table + +```sql +ALTER TABLE gym_trainers ADD COLUMN bio TEXT; + +/* +ALTER TABLE +ADD COLUMN +*/ +``` + + +### Add a column with a UNIQUE constraint + +```sql +ALTER TABLE gym_trainers ADD COLUMN age INT UNIQUE; /* cause why not */ +``` + + +### Add a column with a NOT NULL constraint + +```sql +ALTER TABLE gym_trainers ADD COLUMN main_pokemon CHAR(60) NOT NULL; +``` + +### Remove NOT NULL CONSTRAINT from a column + +```sql +ALTER TABLE gym_trainers ALTER COLUMN main_pokemon DROP NOT NULL; +``` + +### Add a column with a NOT NULL constraint and a DEFAULT value + +```sql +ALTER TABLE gym_trainers ADD COLUMN city CHAR(80) NOT NULL DEFAULT 'Indigo +Plateau'; +``` + + +### Add a column with a CHECK constraint + +```sql + +CREATE TABLE trainers( + + id INT PRIMARY KEY, + trainer_name CHAR(50) NOT NULL, + + /* + method 1 + Add the check yolo style + */ + age INT NOT NULL CHECK (age > 18) + + /* + method 2 + Add a named constraint for better error handling + */ + age INT CONSTRAINT legal_age CHECK (age > 18) NOT NULL + + /* + method 3 + Add the constraint at the end + for more clarity + */ + age INT NOT NULL, + CONSTRAINT legal_age CHECK (age > 18) NOT NULL +); +``` + + +### Add a CONSTRAINT to an existing column + +```sql +ALTER TABLE trainers ADD CONSTRAINT unique_name UNIQUE (trainer_name); +``` + +### Remove a named CONSTRAINT from a table + +```sql +ALTER TABLE trainers DROP CONSTRAINT unique_name; +``` + +### Insert a row into a table + +```sql +INSERT INTO trainers VALUES (1, 23, 'brock'); + +/* or */ + +INSERT INTO trainers (age, trainer_name) +VALUES (1, 23, 'brock'); + +/* + Note that in the second case we don't have pass + the id. Postgres will automatically generate and autoincrement + it for us. To omit the id column we must use named inserts otherwise an + error is raised. +*/ +``` + + +### Insert multiple rows into a table + +```sql + +INSERT INTO trainers (age, trainer_name) VALUES +(19, 'misty'), +(22, 'chen'), +/* ... */ +/* ... */ +``` + + +### Clear a table (without deleting it) + +```sql +TRUNCATE trainers; +``` + + +### Set the primary key type to a serial (An auto incrementing integer) + +```sql + +/* Considering this table structure */ + +CREATE TABLE pokemon_list ( + + id INT PRIMARY KEY, + /* ... */ + /* ... */ + /* ... */ +); + + +/* + 1. + Create a sequence for the auto generating prinary key + It follows the tablename_columnname_seq +*/ +CREATE SEQUENCE pokemon_list_id_seq; + +/* 2. Set the id column to not null */ +ALTER TABLE pokemon_list ALTER COLUMN id SET NOT NULL; + +/* 3. Set the default value to the next value in the sequence*/ +ALTER TABLE pokemon_list +ALTER COLUMN pokemon_list +SET DEFAULT nextval('pokemon_list_id_seq'); + +/* 4. Link the sequence to the correct table and column */ +ALTER SEQUENCE pokemon_list_id_seq OWNED BY pokemon_list.id; +``` + + +### Import data from a file + +```sql +/* + Must use absolute path and the user must have appropriate permissions + Defaults to importing using TAB as the default parameter. + We'll use a csv file as an example +*/ + +COPY pokemon_list FROM '/path/to/yourfile.csv' DELIMITER ','; + +/* + This only works if a pk is specified for each row + The (my) prefered way to do it is the following +*/ + +COPY pokemon_list (name, level, type) FROM '/path/to/yourfile.csv' DELIMITER ','; + +/* It's much more flexible because you control what data you actually import*/ +``` + + +### Export a table to a file + +To be able to export a table to a file, we need to ensure that postgres has +write permissions to the file. + +```bash +sudo chmod 777 /path/to/directory # This is just an example, edit this as needed +``` + +We can now safely copy the table to the file. + +```sql +COPY pokemon_list TO '/path/to/file.csv' DELIMITER ','; +``` + + +### Select columns by using aliases + +```sql +SELECT name AS pokemon_name, type AS pokemon_type +FROM pokemon_list; +``` + + +### Select elements based on a criteria + +```sql +SELECT * +FROM pokemon_list +WHERE id > 3; +``` + + +### Select elements based on string comparison + +```sql +SELECT * +FROM pokemon_list +WHERE type LIKE '%water%'; +``` + + +### Select all results and order them by id in reverse + +```sql +SELECT * +FROM pokemon_list +ORDER BY id DESC; +``` + +### Select all results and order them by a column name + +```sql +SELECT * +FROM pokemon_list +ORDER BY level; + +/* If the column you're ordering by is not of type INT then the ordering will be +done alphabetically */ +``` + +### Select DISTINCT column from table + +```sql +SELECT DISTINCT type AS pokemon_type +FROM pokemon_list; +``` + +### Limit the results from a SELECT query + +```sql +SELECT * +FROM pokemon_list +LIMIT 3; +``` + +### Select the last 3 items + +```sql +SELECT * +FROM pokemon_list +ORDER BY id DESC +LIMIT 3; +``` + +### Create two tables with a foreign key relationship + +```sql +CREATE TABLE pokemon_types( + + id SERIAL PRIMARY KEY, + type_name CHAR(120) NOT NULL +) + +CREATE TABLE pokemon_list( + + id serial PRIMARY KEY, + pokemon_name CHAR(120) NOT NULL, + pokemon_level INT NOT NULL, + pokemon_type INT REFERENCES pokemon_types(id) NOT NULL, + CONSTRAINT pokemon_level_not_zero CHECK (pokemon_level > 0) + +); +``` + +### Perform Joins based on a criteria + +```sql +SELECT name, level, pokemon_types.name AS type +FROM pokemon_list +JOIN pokemon_types +ON pokemon_type_id = pokemon_types.id +WHERE pokemon_type_id = 1; + +/* Will return the name, level and type name for all water pokemons */ +``` + + +### Perform joins + +```sql +SELECT name, level, pokemon_types.name AS type +FROM pokemon_list +JOIN pokemon_types +ON pokemon_type_id = pokemon_types.id; +``` + + +### Create a VIEW based on a JOIN + +```sql +CREATE VIEW pokemonswithtypes AS +SELECT name, level pokemon_types.name AS type +FROM pokemon_list +JOIN pokemon_types +ON pokemon_type_id = pokemon_types.id; + +/* To see the data */ + +SELECT * FROM pokemonswithtypes; +``` + + +### Update the VIEW (Change the query) + +```sql +SELECT name, pokemon_types.name AS type +FROM pokemon_list +JOIN pokemon_types +ON pokemon_type = pokemon_types.id +WHERE pokemon_type = 1; + +/* only show the name and type for water pokemons */ +``` + + +### Delete the VIEW + +```sql +DROP VIEW pokemonswithtypes; +``` + + +### Use aggregate functions (MIN, MAX, SUM, COUNT, AVG) + +```sql +/* MAX */ +SELECT MAX(pokemon_level) +FROM pokemon_list; + +/* MIN */ +SELECT MIN(pokemon_level) +FROM pokemon_list; + +/* AVG */ +SELECT AVG(pokemon_level) +FROM pokemon_list; + +/* ROUND */ +SELECT ROUND(AVG(pokemon_level)) +FROM pokemon_list; + +/* COUNT */ +SELECT COUNT(*) +FROM pokemon_list; + +/* SUM */ +SELECT SUM(pokemon_level) +FROM pokemon_list; +``` + + +### Use boolean aggregate functions + +```sql +/* Add a column is_legendary of type boolean to table pokemon_list */ +ALTER TABLE pokemon_list ADD COLUMN is_legendary BOOL NOT NULL DEFAULT TRUE; + +/* BOOL_AND + returns a result if **ALL** records have that column set to true +*/ + +SELECT BOOL_AND(is_legendary) FROM pokemon_list; + + +/* + BOOL_OR + returns a result if one or more records have that column set to true +*/ + +SELECT BOOL_OR(is_legendary) FROM pokemon_list; +``` + + +### Update a table and change all column values + +```sql +UPDATE pokemon_list +SET is_legendary = FALSE; +``` + + +### Update a table and change value based on a criteria + +```sql +UPDATE pokemon_list +SET is_legendary = TRUE +WHERE id = 2; +``` + + +### Delete row with specific id + +```sql +DELETE FROM pokemon_list WHERE id = 4 +``` + + +### Delete rows withing a range of ids + +```sql +DELETE FROM pokemon_list +WHERE id BETWEEN 1 AND 4; +``` + + +### Delete all rows + +```sql +DELETE FROM pokemon_list; +``` +_Note: The difference between DELETE and DROP or TRUNCATE is that the former can +be undone (rolled back) the latter can't_ + + +### Alter a table to drop a CONSTRAINT if it exist + +```sql +ALTER TABLE pokemon_types +DROP CONSTRAINT IF EXISTS unique_type_name; +``` + +### Comments + +```sql +COMMENT ON TABLE pokemon_types is 'pokemon with types' + +/* To display the comment, in psql simply run \dt+. It will return a description +column containing that comment. It's useful when working on a legacy database +for example*/ + +/* Please note that comments aren't exclusive to tables, they can be executed on +schemas and multiple other objects.*/ +``` + +*Note: If you find errors, typos or would like to add new tips, feel free to +reach out to me on twitter. I'm [@aaqaishtyaq](https://twitter.com/aaqaishtyaq). Thank +you for reading ! And if you find this useful, share it with your friends and +coworkers !* diff --git a/content/notes/2018-06-02-build-deploy-react-app-with-nginx.md b/content/notes/2018-06-02-build-deploy-react-app-with-nginx.md new file mode 100644 index 0000000..aeb0c2d --- /dev/null +++ b/content/notes/2018-06-02-build-deploy-react-app-with-nginx.md @@ -0,0 +1,261 @@ ++++ +title = "Deploy a React app with sass using Nginx" +date = "2018-06-02" ++++ + +A couple of days ago (at the time of writing), I started my newest side +project. It's a portfolio showcasing my (very very very) amateur +photography. It's written in React.js with Sass and I have to say it was +extremely enjoyable to work on. Unsurprisingly though, I ran into some issues while +deploying to production, which after a lot of head banging against every +possible flat surface I could find, I managed to sort. So this post will be about +how to make React.js work with sass in production and how to serve the project +using Nginx as a front end web server. + +We'll be using the official starter kit / CLI tool provided by the facebook +team called `create-react-app`. + +You can install it by running the following command: + +```bash +npm install -g create-react-app +``` + +## Adding Sass to a React project + +Including sass in a React app can be done in two ways: You can either eject the +project and manually modify the webpack config files or you can follow the +procedure in the [official docs][1]. I've personally chosen to go with the first approach. +You can just follow the steps in this [tutorial][2] to get up and running. + +_Note: If you went with the second approach, you can just skip the following and +directly jump to the next section_ + +Once you've done that, open the config/webpack.config.prod.js file, locate the +`rules` section under `module.exports` and add the following snippet to it. + +```javascript +{ + test: /\.sass$/, + use: ExtractTextPlugin.extract({ + fallback: require.resolve("style-loader"), + use: [require.resolve("css-loader"), require.resolve("sass-loader")] + }), + include: paths.appSrc, +} +``` +This config will be executed when you call `npm run build`. If you don't do +this, you'll end up with an empty css file. + +## Important note + +Before building the project for the first time, we must first unregister the +service worker. Doing this will ensure that the static assets aren't cached by +the client's browsers. It's important because if you skip this step, any +subsequent change / build you'll deploy won't necessarily be reflected right away +client side. (I'm not knowledgable enough on service workers to provide details +on this behaviour, but it's something to note. If you really need service +workers in your project, you might want to explore other solutions to avoid +asset caching). + +Your index.js file should now look something like this: + +```javascript +import { unregister } from './registerServiceWorker' +import App from './App'; +ReactDOM.render(, document.getElementById('root')); +unregister() +``` + +## Building the project + +This is the part where we actually build our project. By building I mean +compiling all React files and their related dependencies, transpiling them into +vanilla Javascript, compiling sass files into css, minifying them etc ... +To do this, we can simply run `npm run build` and voila ! You should have a +brand new `build` folder in your project. + +## Creating our deploy script + +Your app is now compiled and ready to be served. Now is a good time to start +thinking about deployment strategies. In this basic scenario +(and quite frankly, most scenraios), you really just want to upload the build +folder to a remote server and have it accessible as a static site. + +You can use ftp to transfer the files and it would be perfectly acceptable, but +it's not the most flexible solution. The alternative is to use a CLI utility called +`rsync` (which is available on mac and linux, not sure about windows). With +rsync, you can synchronise files and folders within the same computer or across +machines you have ssh access to. + +Here's the command we would run to synchronise the build folder to a server +on the internet: + +```bash +# Asssuming we're inside the project folder +rsync -avP build/ username@remote_ip:/destination/path +``` + +Let's break down this command: + +`-a` means archive, which is a shortcut for multiple switches. It +recursively syncs all files and subfolders within `build` to the +destnation path, keeping the modification dates, permissions and other metadata +unchanged. + +`-v` means verbose. It just outputs the steps to the screen so you can see +what happens in real time. + +`-P` stands for progress. This is particularly useful in this case because you +rely on a network connection to sync the files. Using this option will +display a progress bar for each file in the queue. + +But you don't want to keep doing all of that every time we want to push now do +you ? + +Thankfully, you can use create a bash script to automate this process a litte +bit. Here's how mine looks like: + +```bash +#!/bin/sh + +echo "[log] - Merging branch to master" +git checkout master && git merge develop && git push origin master +echo "[log] - Merge completed" + +echo "[log] - Compiling project to build folder ..." +npm run build +echo "[log] - Build process done" + +echo "[log] - Deploying files to server" +rsync -avP build/ user@host:/destination/path +echo "[log] - Deployment completed" + +echo "[log] - Switching to develop" +git checkout develop +echo "[log] - Done!" +``` + +Again, let's walk through that script section by section: + +1. I checkout to master, merge develop and push. This ensures that my master + branch is always up to date with the latest version of my working codebase. +2. I execute `npm run build` which, as previously explained, will create the + build directory with our compiled, ready to be deployed files. +3. I use rsync to copy over the contents of the build folder to the destination + path in the remote machine I administer. (notice the trailing slash after + build/, this tells rsync to copy the contents of the folder and not the + folder itself). +4. I switch the current working branch back to develop so that I can start + developing without accidentally altering the state of `master`. + +Obviously this is very basic and in a more complex project, you'd have to run +unit tests and do other things your project requires. + +Finally, you need to give executable permissions to the file by running: + +```bash +sudo chmod +x deploy.sh +``` + +Now all you have to do when you want to deploy your project to production is run + +```bash +./deploy.sh +``` + +## Serving our site with Nginx + +*Note: this assumes your server is running ubuntu or any other debian based +distro* + +Ok, so if you've followed the steps correctly, you should have your project +files uploaded to your remote server. Now we need to use Nginx to make the site +accessible to the internet. + +First, create a new config file inside `/etc/nginx/sites-available`. + +```bash +cd /etc/nginx/sites-available +touch mywebsiteconfig # File extension is not required in this case +``` +Next step is to edit the file using either vim or nano (sudo privileges may be +required). + +```nginx +server { + + listen 80; + server_name mywebsite.com www.mywebsite.com; + + location / { + root /path/to/your/project; + index index.html index.htm; + + default_type "text/html"; + } + + access_log /var/log/nginx/mywebsite_access.log; + error_log /var/log/nginx/mywebsite_errors.log; +} +``` + +If you're not familiar with Nginx, let me explain what you just copied. + +1. We created a server block to hold our configuration and keep it separate from +other configs we may add in the future (like https for example). +2. We declare the listen directive which tells nginx to listen on port 80. +3. We set the server\_name to our domain name. This tells nginx to apply the +config settings to any incoming request from any of listed urls. +4. Finally, we specify the paths to both the access and error logs. It's + optional but highly recommended, so that you know exactly where to look when + errors happen. This will save you a tonne of time when troubleshooting issues + in the future. + +Close the file and exit nano or vim. + +Nginx keeps its configuration files in two separate directories: +`/etc/nginx/sites-available` and `/etc/nginx/sites-enabled`. It will serve +any website whose configuration file is in the latter folder. + +All you have to do now is create a symlink (think of it like a shortcut to an app +in a desktop GUI) to your config, and store it in `sites-enabled`. +That way, if you ever decide to shut down the site, you'll simply need to +delete the symlink and you're good. + +Before creating the symlink, it's good measure to check if the configuration +file has any errors in it. To check for errors just run the following command: + +```bash +sudo nginx -t +``` + +It will scan all of your config files and check for errors (and return them to +you if there are any). + +Now you can create the symlink by executing: + +```bash +sudo ln -s /etc/nginx/sites-available/mywebsite /etc/nginx/sites-enabled +``` + +_Note: If you decide to use service workers with your project and you're still +running into caching issues, you should know that Nginx can also be used as a +static assets server. I haven't looked into this scenario yet. I might test that +approach in the future and detail the process in another post._ + +Lastly, reload nginx and your website should now be accessible via its URL +(provided you correctly setup the DNS settings with your domain name registrar) + +```bash +sudo service nginx reload +``` + +If you read this article all the way through, thanks for sticking with me ! You +can send me questions, remarks, or comments on twitter, I'm [@aaqaishtyaq][3] on +twitter. + +[0]: http://example.com +[1]: https://github.com/facebookincubator/create-react-app/#getting-started +[2]: https://medium.com/@Connorelsea/using-sass-with-create-react-app-7125d6913760 +[3]: https://twitter.com/aaqaishtyaq diff --git a/content/notes/2018-09-01-scrapy-splash-setup.md b/content/notes/2018-09-01-scrapy-splash-setup.md new file mode 100644 index 0000000..5fe1dff --- /dev/null +++ b/content/notes/2018-09-01-scrapy-splash-setup.md @@ -0,0 +1,115 @@ ++++ +title = "Setting up Scrapy Splash Plugin" +date = "2018-09-01" ++++ + +Scrapy is good for scraping static web pages using python but when it comes to dynamic web pages scrapy can't do wonders, and there comes ```Selenium``` but as good as selenium is, it just got beaten by Scrapy in terms or speed. + +Web nowdays is all about Dynamic JS based pages and AJAX. So for this very scenario the guys over [scrapy-plugins][0] created ```scrapy-splash```. +Scrapy-Splash is a plugin that connects Scrapy with Splash (Lightweight, scriptable browser as a service with an HTTP API). +In a nutshell what splash do is it traps the response recieved from the server and renders it. Then it return a ```render.html``` which is static and can be easily scraped. + +## 0 - Setting up the machine + +A. Before we begin you need to install ```Docker``` first, You can follow the [official instruction][1] as per your Operating System. + +B. After installing docker navigate to your project folder, activate ```virtualenv``` and install scrapy-splsh plugin + +```bash +pip3 install scrapy-splash +``` + +C. Pull the Splash Docker Image and run it + +```bash +docker pull scrapinghub/splash +docker run -p 8050:8050 scrapinghub/splash +``` + +## 1 - Configuration + +A. Add the Splash server address to ```settings.py``` of your Scrapy project like this: + +```python +SPLASH_URL = 'http://localhost:8050' +``` +If you are running docker on your local machine then you can simply use ```http://localhost:``` , but if you are running it on a remote machine you need to specify it's I.P. Address like this ```http://192.168.59.103:``` + + +B. Enable the Splash middleware by adding it to ```DOWNLOADER_MIDDLEWARES``` in your ```settings.py``` file and changing HttpCompressionMiddleware priority: + +```python +DOWNLOADER_MIDDLEWARES = { + 'scrapy_splash.SplashCookiesMiddleware': 723, + 'scrapy_splash.SplashMiddleware': 725, + 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810, +} +``` + + +C. Enable SplashDeduplicateArgsMiddleware by adding it to SPIDER_MIDDLEWARES in your settings.py: + +```python +SPIDER_MIDDLEWARES = { + 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100, +} +``` + + +D. Set a custom DUPEFILTER_CLASS: + +```python +DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter' +``` + +## 2 - Scraping with Splash + +Before you use ```scrapy-splash``` you need to import it in your spider. +You can do that by adding this line: + +```python +from scrapy_splash import SplashRequest +``` + +from now on insted of using ```scrapy.Request``` you can simply use ```SplashRequest``` to get response from ```Splash``` insted of directly from ther server. + +## Bonus - Using Scrapy-Splash in Shell + +It's all well and good but actual spider buiding does not happens in ```vim``` or ```sublime```, it takes place in ```shell```. + +**So how to use Splash in the shell?** + +Good Question. + +Insted of invoking shell with: + +```bash +scrapy shell +>>> fetch(http://domain.com/page-with-javascript.html) +``` +or with this: + +```bash +scrapy shell http://domain.com/page-with-javascript.html +``` + +**You invoke shell with this**: + +```bash +scrapy shell 'http://localhost:8050/render.html?url=http://domain.com/page-with-javascript.html&timeout=10&wait=0.5' +``` + +Let me explain + +* ```localhost:port``` is where your splash service is running +* ```url``` is url you want to crawl +* ```render.html``` is one of the possible http api endpoints, returns redered html page in this case +* ```timeout``` time in seconds for timeout +* ```wait``` time in seconds to wait for javascript to execute before reading/saving the html. + + +If I’ve missed something, made a horrible mistake of if you have any questions regarding this article then feel free to ping me on Twitter. I’m +[@aaqaishtyaq](https://twitter.com/aaqaishtyaq). + +[0]: https://github.com/scrapy-plugins +[1]: https://docs.docker.com/install/ diff --git a/content/notes/2022-07-29-git-exclude-files.md b/content/notes/2022-07-29-git-exclude-files.md new file mode 100644 index 0000000..5b05db9 --- /dev/null +++ b/content/notes/2022-07-29-git-exclude-files.md @@ -0,0 +1,33 @@ ++++ +title = "Git exclude files from working copy" +date = "2022-07-29" ++++ + +## How to ignore new files + +### Globally + +Add the path(s) to your file(s) which you would like to ignore to your `.gitignore` file (and commit them). These file entries will also apply to others checking out the repository. + +### Locally + +Add the path(s) to your file(s) which you would like to ignore to your `.git/info/exclude` file. These file entries will only apply to your local working copy. + +## How to ignore changed files (temporarily) + +In order to ignore changed files to being listed as modified, you can use the following git command: + +```console +git update-index --assume-unchanged +``` + +To revert that ignorance use the following command: + +```console +git update-index --no-assume-unchanged +``` + +*Note: If you find errors, typos or would like to add new tips, feel free to +reach out to me on twitter. I'm [@aaqaishtyaq](https://twitter.com/aaqaishtyaq). Thank +you for reading ! And if you find this useful, share it with your friends and +coworkers !* diff --git a/content/notes/2023-01-26-linux-container-networking.md b/content/notes/2023-01-26-linux-container-networking.md index 5d69aba..7056a9e 100644 --- a/content/notes/2023-01-26-linux-container-networking.md +++ b/content/notes/2023-01-26-linux-container-networking.md @@ -2,10 +2,9 @@ title = "Linux Container Networking from Scratch" date = "2023-01-26" +++ +In this article, we will be looking into setting up networking on a linux box from scratch. -I will be using this [iximiuz post](https://iximiuz.com/en/posts/container-networking-is-simple/) as the reference for this article series. - -We will be creating a fresh new VM using lima on macOS. +We will be creating a fresh new VM using Lima on macOS. You can create a new VM using VirtualBox, Vagrant, or even create a VM on OCI for free. ## Isolation based on Network Namespace