diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..18e85f09f --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +github: [AzorianSolutions] \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..550f15e21 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,7 @@ +--- +# Reference: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser +blank_issues_enabled: false +contact_links: + - name: 📖 Project Update - PLEASE READ! + url: https://github.com/PowerDNS-Admin/PowerDNS-Admin/discussions/1708 + about: "Important information about the future of this project" diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..05a661177 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,14 @@ + +### Fixes: #1234 + + \ No newline at end of file diff --git a/.github/SUPPORT.md b/.github/SUPPORT.md new file mode 100644 index 000000000..e0df5a649 --- /dev/null +++ b/.github/SUPPORT.md @@ -0,0 +1,15 @@ +# PowerDNS Admin + +## Project Support + +**Looking for help?** PDA has a somewhat active community of fellow users that may be able to provide assistance. +Just [start a discussion](https://github.com/PowerDNS-Admin/PowerDNS-Admin/discussions/new) right here on GitHub! + +Looking to chat with someone? Join our [Discord Server](https://discord.powerdnsadmin.org). + +Some general tips for engaging here on GitHub: + +* Register for a free [GitHub account](https://github.com/signup) if you haven't already. +* You can use [GitHub Markdown](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) for formatting text and adding images. +* To help mitigate notification spam, please avoid "bumping" issues with no activity. (To vote an issue up or down, use a :thumbsup: or :thumbsdown: reaction.) +* Please avoid pinging members with `@` unless they've previously expressed interest or involvement with that particular issue. diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..482207e3b --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,23 @@ +--- +version: 2 +updates: + - package-ecosystem: npm + target-branch: dev + directory: / + schedule: + interval: daily + ignore: + - dependency-name: "*" + update-types: [ "version-update:semver-major" ] + labels: + - 'feature / dependency' + - package-ecosystem: pip + target-branch: dev + directory: / + schedule: + interval: daily + ignore: + - dependency-name: "*" + update-types: [ "version-update:semver-major" ] + labels: + - 'feature / dependency' diff --git a/.github/labels.yml b/.github/labels.yml new file mode 100644 index 000000000..e17cd97d8 --- /dev/null +++ b/.github/labels.yml @@ -0,0 +1,98 @@ +--- +labels: + - name: bug / broken-feature + description: Existing feature malfunctioning or broken + color: 'd73a4a' + - name: bug / security-vulnerability + description: Security vulnerability identified with the application + color: 'd73a4a' + - name: docs / discussion + description: Documentation change proposals + color: '0075ca' + - name: docs / request + description: Documentation change request + color: '0075ca' + - name: feature / dependency + description: Existing feature dependency + color: '008672' + - name: feature / discussion + description: New or existing feature discussion + color: '008672' + - name: feature / request + description: New feature or enhancement request + color: '008672' + - name: feature / update + description: Existing feature modification + color: '008672' + - name: help / deployment + description: Questions regarding application deployment + color: 'd876e3' + - name: help / features + description: Questions regarding the use of application features + color: 'd876e3' + - name: help / other + description: General questions not specific to application deployment or features + color: 'd876e3' + - name: mod / accepted + description: This request has been accepted + color: 'e5ef23' + - name: mod / announcement + description: This is an admin announcement + color: 'e5ef23' + - name: mod / change-request + description: Used by internal developers to indicate a change-request. + color: 'e5ef23' + - name: mod / changes-requested + description: Changes have been requested before proceeding + color: 'e5ef23' + - name: mod / duplicate + description: This issue or pull request already exists + color: 'e5ef23' + - name: mod / good-first-issue + description: Good for newcomers + color: 'e5ef23' + - name: mod / help-wanted + description: Extra attention is needed + color: 'e5ef23' + - name: mod / invalid + description: This doesn't seem right + color: 'e5ef23' + - name: mod / rejected + description: This request has been rejected + color: 'e5ef23' + - name: mod / reviewed + description: This request has been reviewed + color: 'e5ef23' + - name: mod / reviewing + description: This request is being reviewed + color: 'e5ef23' + - name: mod / stale + description: This request has gone stale + color: 'e5ef23' + - name: mod / tested + description: This has been tested + color: 'e5ef23' + - name: mod / testing + description: This is being tested + color: 'e5ef23' + - name: mod / wont-fix + description: This will not be worked on + color: 'e5ef23' + - name: skill / database + description: Requires a database skill-set + color: '5319E7' + - name: skill / docker + description: Requires a Docker skill-set + color: '5319E7' + - name: skill / documentation + description: Requires a documentation skill-set + color: '5319E7' + - name: skill / html + description: Requires a HTML skill-set + color: '5319E7' + - name: skill / javascript + description: Requires a JavaScript skill-set + color: '5319E7' + - name: skill / python + description: Requires a Python skill-set + color: '5319E7' \ No newline at end of file diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index a1b7aa1b2..000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,19 +0,0 @@ -# Number of days of inactivity before an issue becomes stale -daysUntilStale: 60 -# Number of days of inactivity before a stale issue is closed -daysUntilClose: 7 -# Issues with these labels will never be considered stale -exemptLabels: - - pinned - - security - - enhancement - - feature request -# Label to use when marking an issue as stale -staleLabel: wontfix -# Comment to post when marking an issue as stale. Set to `false` to disable -markComment: > - This issue has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. Thank you - for your contributions. -# Comment to post when closing a stale issue. Set to `false` to disable -closeComment: true diff --git a/.github/workflows/build-and-publish.yml b/.github/workflows/build-and-publish.yml index 4b103f8ce..928386035 100644 --- a/.github/workflows/build-and-publish.yml +++ b/.github/workflows/build-and-publish.yml @@ -1,54 +1,79 @@ +--- +name: 'Docker Image' + on: + workflow_dispatch: push: branches: + - 'dev' - 'master' tags: - 'v*.*.*' + paths-ignore: + - .github/** + - deploy/** + - docker-test/** + - docs/** + - .dockerignore + - .gitattributes + - .gitignore + - .lgtm.yml + - .whitesource + - .yarnrc + - docker-compose.yml + - docker-compose-test.yml + - LICENSE + - README.md + - SECURITY.md jobs: build-and-push-docker-image: - name: Build Docker image and push to repositories + name: Build Docker Image runs-on: ubuntu-latest steps: - - name: Checkout code + - name: Repository Checkout uses: actions/checkout@v2 - - name: Docker meta + - name: Docker Image Metadata id: meta uses: docker/metadata-action@v3 with: images: | - ngoduykhanh/powerdns-admin + powerdnsadmin/pda-legacy tags: | type=ref,event=tag type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} - - name: Set up Docker Buildx + - name: QEMU Setup + uses: docker/setup-qemu-action@v2 + + - name: Docker Buildx Setup id: buildx uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub + - name: Docker Hub Authentication uses: docker/login-action@v1 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + username: ${{ secrets.DOCKERHUB_USERNAME_V2 }} + password: ${{ secrets.DOCKERHUB_TOKEN_V2 }} - - name: Build latest image - uses: docker/build-push-action@v2 - if: github.ref == 'refs/heads/master' + - name: Docker Image Build + uses: docker/build-push-action@v4 with: + platforms: linux/amd64,linux/arm64 context: ./ file: ./docker/Dockerfile push: true - tags: ngoduykhanh/powerdns-admin:latest + tags: powerdnsadmin/pda-legacy:${{ github.ref_name }} - - name: Build release image - uses: docker/build-push-action@v2 + - name: Docker Image Release Tagging + uses: docker/build-push-action@v4 if: ${{ startsWith(github.ref, 'refs/tags/v') }} with: + platforms: linux/amd64,linux/arm64 context: ./ file: ./docker/Dockerfile push: true diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 000000000..9f4b66fc1 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,134 @@ +--- +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + workflow_dispatch: + push: + branches: + - 'dev' + - 'main' + - 'master' + - 'dependabot/**' + - 'feature/**' + - 'issue/**' + paths-ignore: + - .github/** + - deploy/** + - docker/** + - docker-test/** + - docs/** + - powerdnsadmin/static/assets/** + - powerdnsadmin/static/custom/css/** + - powerdnsadmin/static/img/** + - powerdnsadmin/swagger-spec.yaml + - .dockerignore + - .gitattributes + - .gitignore + - .lgtm.yml + - .whitesource + - .yarnrc + - docker-compose.yml + - docker-compose-test.yml + - LICENSE + - package.json + - README.md + - requirements.txt + - SECURITY.md + - yarn.lock + pull_request: + # The branches below must be a subset of the branches above + branches: + - 'dev' + - 'main' + - 'master' + - 'dependabot/**' + - 'feature/**' + - 'issue/**' + paths-ignore: + - .github/** + - deploy/** + - docker/** + - docker-test/** + - docs/** + - powerdnsadmin/static/assets/** + - powerdnsadmin/static/custom/css/** + - powerdnsadmin/static/img/** + - powerdnsadmin/swagger-spec.yaml + - .dockerignore + - .gitattributes + - .gitignore + - .lgtm.yml + - .whitesource + - .yarnrc + - docker-compose.yml + - docker-compose-test.yml + - LICENSE + - package.json + - README.md + - requirements.txt + - SECURITY.md + - yarn.lock + schedule: + - cron: '45 2 * * 2' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'javascript', 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml new file mode 100644 index 000000000..cf6f0b338 --- /dev/null +++ b/.github/workflows/lock.yml @@ -0,0 +1,24 @@ +--- +# lock-threads (https://github.com/marketplace/actions/lock-threads) +name: 'Lock threads' + +on: + schedule: + - cron: '0 3 * * *' + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + +jobs: + lock: + runs-on: ubuntu-latest + steps: + - uses: dessant/lock-threads@v3 + with: + issue-inactive-days: 90 + pr-inactive-days: 30 + issue-lock-reason: 'resolved' + exclude-any-issue-labels: 'bug / security-vulnerability, mod / announcement, mod / accepted, mod / reviewing, mod / testing' + exclude-any-pr-labels: 'bug / security-vulnerability, mod / announcement, mod / accepted, mod / reviewing, mod / testing' \ No newline at end of file diff --git a/.github/workflows/mega-linter.yml b/.github/workflows/mega-linter.yml new file mode 100644 index 000000000..fa29545f0 --- /dev/null +++ b/.github/workflows/mega-linter.yml @@ -0,0 +1,92 @@ +--- +# MegaLinter GitHub Action configuration file +# More info at https://megalinter.io +name: MegaLinter + +on: + workflow_dispatch: + push: + branches-ignore: + - "*" + - "dev" + - "main" + - "master" + - "dependabot/**" + - "feature/**" + - "issues/**" + - "release/**" + +env: # Comment env block if you do not want to apply fixes + # Apply linter fixes configuration + APPLY_FIXES: all # When active, APPLY_FIXES must also be defined as environment variable (in github/workflows/mega-linter.yml or other CI tool) + APPLY_FIXES_EVENT: all # Decide which event triggers application of fixes in a commit or a PR (pull_request, push, all) + APPLY_FIXES_MODE: pull_request # If APPLY_FIXES is used, defines if the fixes are directly committed (commit) or posted in a PR (pull_request) + +concurrency: + group: ${{ github.ref }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + build: + name: MegaLinter + runs-on: ubuntu-latest + steps: + # Git Checkout + - name: Checkout Code + uses: actions/checkout@v3 + with: + token: ${{ secrets.PAT || secrets.GITHUB_TOKEN }} + + # MegaLinter + - name: MegaLinter + id: ml + # You can override MegaLinter flavor used to have faster performances + # More info at https://megalinter.io/flavors/ + uses: oxsecurity/megalinter@v6 + env: + # All available variables are described in documentation + # https://megalinter.io/configuration/ + VALIDATE_ALL_CODEBASE: true # Validates all source when push on main, else just the git diff with main. Override with true if you always want to lint all sources + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PAT: ${{ secrets.PAT }} + # ADD YOUR CUSTOM ENV VARIABLES HERE OR DEFINE THEM IN A FILE .mega-linter.yml AT THE ROOT OF YOUR REPOSITORY + # DISABLE: COPYPASTE,SPELL # Uncomment to disable copy-paste and spell checks + + # Upload MegaLinter artifacts + - name: Archive production artifacts + if: ${{ success() }} || ${{ failure() }} + uses: actions/upload-artifact@v3 + with: + name: MegaLinter reports + path: | + megalinter-reports + mega-linter.log + + # Create pull request if applicable (for now works only on PR from same repository, not from forks) + - name: Create PR with applied fixes + id: cpr + if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'pull_request' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) + uses: peter-evans/create-pull-request@v4 + with: + token: ${{ secrets.PAT || secrets.GITHUB_TOKEN }} + commit-message: "[MegaLinter] Apply linters automatic fixes" + title: "[MegaLinter] Apply linters automatic fixes" + labels: bot + + - name: Create PR output + if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'pull_request' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) + run: | + echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" + echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" + + # Push new commit if applicable (for now works only on PR from same repository, not from forks) + - name: Prepare commit + if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'commit' && github.ref != 'refs/heads/main' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) + run: sudo chown -Rc $UID .git/ + - name: Commit and push applied linter fixes + if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'commit' && github.ref != 'refs/heads/main' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) + uses: stefanzweifel/git-auto-commit-action@v4 + with: + branch: ${{ github.event.pull_request.head.ref || github.head_ref || github.ref }} + commit_message: "[MegaLinter] Apply linters fixes" + diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000..9b565ec09 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,46 @@ +# close-stale-issues (https://github.com/marketplace/actions/close-stale-issues) +name: 'Close Stale Threads' + +on: + schedule: + - cron: '0 4 * * *' + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + +jobs: + stale: + + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v6 + with: + close-issue-message: > + This issue has been automatically closed due to lack of activity. In an + effort to reduce noise, please do not comment any further. Note that the + core maintainers may elect to reopen this issue at a later date if deemed + necessary. + close-pr-message: > + This PR has been automatically closed due to lack of activity. + days-before-stale: 90 + days-before-close: 30 + exempt-issue-labels: 'bug / security-vulnerability, mod / announcement, mod / accepted, mod / reviewing, mod / testing' + operations-per-run: 100 + remove-stale-when-updated: false + stale-issue-label: 'mod / stale' + stale-issue-message: > + This issue has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. PDA + is governed by a small group of core maintainers which means not all opened + issues may receive direct feedback. **Do not** attempt to circumvent this + process by "bumping" the issue; doing so will result in its immediate closure + and you may be barred from participating in any future discussions. Please see our + [Contribution Guide](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/docs/CONTRIBUTING.md). + stale-pr-label: 'mod / stale' + stale-pr-message: > + This PR has been automatically marked as stale because it has not had + recent activity. It will be closed automatically if no further action is + taken. Please see our + [Contribution Guide](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/docs/CONTRIBUTING.md). \ No newline at end of file diff --git a/.gitignore b/.gitignore index 539325fd0..91bfea922 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +flask_session + # gedit *~ diff --git a/LICENSE b/LICENSE index 6f51774bf..8375c2ed5 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,7 @@ The MIT License (MIT) Copyright (c) 2016 Khanh Ngo - ngokhanhit[at]gmail.com +Copyright (c) 2022 Azorian Solutions - legal[at]azorian.solutions Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 8067855f8..4435c7f7e 100644 --- a/README.md +++ b/README.md @@ -1,49 +1,67 @@ # PowerDNS-Admin + A PowerDNS web interface with advanced features. -[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/PowerDNS-Admin/PowerDNS-Admin.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/PowerDNS-Admin/PowerDNS-Admin/context:python) -[![Language grade: JavaScript](https://img.shields.io/lgtm/grade/javascript/g/PowerDNS-Admin/PowerDNS-Admin.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/PowerDNS-Admin/PowerDNS-Admin/context:javascript) +[![CodeQL](https://github.com/PowerDNS-Admin/PowerDNS-Admin/actions/workflows/codeql-analysis.yml/badge.svg?branch=master)](https://github.com/PowerDNS-Admin/PowerDNS-Admin/actions/workflows/codeql-analysis.yml) +[![Docker Image](https://github.com/PowerDNS-Admin/PowerDNS-Admin/actions/workflows/build-and-publish.yml/badge.svg?branch=master)](https://github.com/PowerDNS-Admin/PowerDNS-Admin/actions/workflows/build-and-publish.yml) #### Features: -- Multiple domain management -- Domain template -- User management -- User access management based on domain -- User activity logging -- Support Local DB / SAML / LDAP / Active Directory user authentication -- Support Google / Github / Azure / OpenID OAuth -- Support Two-factor authentication (TOTP) -- Dashboard and pdns service statistics + +- Provides forward and reverse zone management +- Provides zone templating features +- Provides user management with role based access control +- Provides zone specific access control +- Provides activity logging +- Authentication: + - Local User Support + - SAML Support + - LDAP Support: OpenLDAP / Active Directory + - OAuth Support: Google / GitHub / Azure / OpenID +- Two-factor authentication support (TOTP) +- PDNS Service Configuration & Statistics Monitoring - DynDNS 2 protocol support -- Edit IPv6 PTRs using IPv6 addresses directly (no more editing of literal addresses!) -- Limited API for manipulating zones and records -- Full IDN/Punycode support +- Easy IPv6 PTR record editing +- Provides an API for zone and record management among other features +- Provides full IDN/Punycode support + +## [Project Update - PLEASE READ!!!](https://github.com/PowerDNS-Admin/PowerDNS-Admin/discussions/1708) ## Running PowerDNS-Admin -There are several ways to run PowerDNS-Admin. The easiest way is to use Docker. -If you are looking to install and run PowerDNS-Admin directly onto your system check out the [Wiki](https://github.com/PowerDNS-Admin/PowerDNS-Admin/wiki#installation-guides) for ways to do that. + +There are several ways to run PowerDNS-Admin. The quickest way is to use Docker. +If you are looking to install and run PowerDNS-Admin directly onto your system, check out +the [wiki](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/docs/wiki/) for ways to do that. ### Docker -This are two options to run PowerDNS-Admin using Docker. -To get started as quickly as possible try option 1. If you want to make modifications to the configuration option 2 may be cleaner. + +Here are two options to run PowerDNS-Admin using Docker. +To get started as quickly as possible, try option 1. If you want to make modifications to the configuration option 2 may +be cleaner. #### Option 1: From Docker Hub -The easiest is to just run the latest Docker image from Docker Hub: + +To run the application using the latest stable release on Docker Hub, run the following command: + ``` $ docker run -d \ -e SECRET_KEY='a-very-secret-key' \ -v pda-data:/data \ -p 9191:80 \ - ngoduykhanh/powerdns-admin:latest + powerdnsadmin/pda-legacy:latest ``` -This creates a volume called `pda-data` to persist the SQLite database with the configuration. + +This creates a volume named `pda-data` to persist the default SQLite database with app configuration. #### Option 2: Using docker-compose + 1. Update the configuration Edit the `docker-compose.yml` file to update the database connection string in `SQLALCHEMY_DATABASE_URI`. - Other environment variables are mentioned in the [legal_envvars](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/configs/docker_config.py#L5-L46). - To use the Docker secrets feature it is possible to append `_FILE` to the environment variables and point to a file with the values stored in it. - Make sure to set the environment variable `SECRET_KEY` to a long random string (https://flask.palletsprojects.com/en/1.1.x/config/#SECRET_KEY) + Other environment variables are mentioned in + the [AppSettings.defaults](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/powerdnsadmin/lib/settings.py) dictionary. + To use a Docker-style secrets convention, one may append `_FILE` to the environment variables with a path to a file + containing the intended value of the variable (e.g. `SQLALCHEMY_DATABASE_URI_FILE=/run/secrets/db_uri`). + Make sure to set the environment variable `SECRET_KEY` to a long, random + string (https://flask.palletsprojects.com/en/1.1.x/config/#SECRET_KEY) 2. Start docker container ``` @@ -53,8 +71,28 @@ This creates a volume called `pda-data` to persist the SQLite database with the You can then access PowerDNS-Admin by pointing your browser to http://localhost:9191. ## Screenshots -![dashboard](https://user-images.githubusercontent.com/6447444/44068603-0d2d81f6-9fa5-11e8-83af-14e2ad79e370.png) -## LICENSE -MIT. See [LICENSE](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/LICENSE) +![dashboard](docs/screenshots/dashboard.png) + +## Support + +**Looking for help?** Try taking a look at the project's +[Support Guide](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/.github/SUPPORT.md) or joining +our [Discord Server](https://discord.powerdnsadmin.org). + +## Security Policy + +Please see our [Security Policy](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/SECURITY.md). + +## Contributing + +Please see our [Contribution Guide](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/docs/CONTRIBUTING.md). + +## Code of Conduct + +Please see our [Code of Conduct Policy](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/docs/CODE_OF_CONDUCT.md). + +## License +This project is released under the MIT license. For additional +information, [see the full license](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/LICENSE). diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..bd91d3626 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,31 @@ +# Security Policy + +## No Warranty + +Per the terms of the MIT license, PDA is offered "as is" and without any guarantee or warranty pertaining to its operation. While every reasonable effort is made by its maintainers to ensure the product remains free of security vulnerabilities, users are ultimately responsible for conducting their own evaluations of each software release. + +## Recommendations + +Administrators are encouraged to adhere to industry best practices concerning the secure operation of software, such as: + +* Do not expose your PDA installation to the public Internet +* Do not permit multiple users to share an account +* Enforce minimum password complexity requirements for local accounts +* Prohibit access to your database from clients other than the PDA application +* Keep your deployment updated to the most recent stable release + +## Reporting a Suspected Vulnerability + +If you believe you've uncovered a security vulnerability and wish to report it confidentially, you may do so via email. Please note that any reported vulnerabilities **MUST** meet all the following conditions: + +* Affects the most recent stable release of PDA, or a current beta release +* Affects a PDA instance installed and configured per the official documentation +* Is reproducible following a prescribed set of instructions + +Please note that we **DO NOT** accept reports generated by automated tooling which merely suggest that a file or file(s) _may_ be vulnerable under certain conditions, as these are most often innocuous. + +If you believe that you've found a vulnerability which meets all of these conditions, please [submit a draft security advisory](https://github.com/PowerDNS-Admin/PowerDNS-Admin/security/advisories/new) on GitHub, or email a brief description of the suspected bug and instructions for reproduction to **admin@powerdnsadmin.org**. + +### Bug Bounties + +As PDA is provided as free open source software, we do not offer any monetary compensation for vulnerability or bug reports, however your contributions are greatly appreciated. \ No newline at end of file diff --git a/VERSION b/VERSION new file mode 100644 index 000000000..f7abe273d --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.4.2 \ No newline at end of file diff --git a/configs/development.py b/configs/development.py index 2c2e63d6c..be6cf3b51 100644 --- a/configs/development.py +++ b/configs/development.py @@ -7,7 +7,7 @@ SECRET_KEY = 'e951e5a1f4b94151b360f47edf596dd2' BIND_ADDRESS = '0.0.0.0' PORT = 9191 -OFFLINE_MODE = False +SERVER_EXTERNAL_SSL = os.getenv('SERVER_EXTERNAL_SSL', None) ### DATABASE CONFIG SQLA_DB_USER = 'pda' @@ -16,7 +16,19 @@ SQLA_DB_NAME = 'pda' SQLALCHEMY_TRACK_MODIFICATIONS = True +#CAPTCHA Config +CAPTCHA_ENABLE = True +CAPTCHA_LENGTH = 6 +CAPTCHA_WIDTH = 160 +CAPTCHA_HEIGHT = 60 +CAPTCHA_SESSION_KEY = 'captcha_image' + +#Server side sessions tracking +#Set to TRUE for CAPTCHA, or enable another stateful session tracking system +SESSION_TYPE = 'sqlalchemy' + ### DATABASE - MySQL +## Don't forget to uncomment the import in the top #SQLALCHEMY_DATABASE_URI = 'mysql://{}:{}@{}/{}'.format( # urllib.parse.quote_plus(SQLA_DB_USER), # urllib.parse.quote_plus(SQLA_DB_PASSWORD), @@ -24,6 +36,15 @@ # SQLA_DB_NAME #) +### DATABASE - PostgreSQL +## Don't forget to uncomment the import in the top +#SQLALCHEMY_DATABASE_URI = 'postgres://{}:{}@{}/{}'.format( +# urllib.parse.quote_plus(SQLA_DB_USER), +# urllib.parse.quote_plus(SQLA_DB_PASSWORD), +# SQLA_DB_HOST, +# SQLA_DB_NAME +#) + ### DATABASE - SQLite SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'pdns.db') @@ -113,6 +134,14 @@ # ### the user is set as a non-administrator user. # #SAML_ATTRIBUTE_ADMIN = 'https://example.edu/pdns-admin' +## Attribute to get admin status for groups with the IdP +# ### Default: Don't set administrator group with SAML attributes +#SAML_GROUP_ADMIN_NAME = 'GroupName' + +## Attribute to get operator status for groups with the IdP +# ### Default: Don't set operator group with SAML attributes +#SAML_GROUP_OPERATOR_NAME = 'GroupName' + # ## Attribute to get account names from # ### Default: Don't control accounts with SAML attribute # ### If set, the user will be added and removed from accounts to match @@ -120,6 +149,16 @@ # ### be created and the user added to them. # SAML_ATTRIBUTE_ACCOUNT = 'https://example.edu/pdns-account' +# ## Attribute name that aggregates group names +# ### Default: Don't collect IdP groups from SAML group attributes +# ### In Okta, you can assign administrators by group using "Group Attribute Statements." +# ### In this case, the SAML_ATTRIBUTE_GROUP will be the attribute name for a collection of +# ### groups passed in the SAML assertion. From there, you can specify a SAML_GROUP_ADMIN_NAME. +# ### If the user is a member of this group, and that group name is included in the collection, +# ### the user will be set as an administrator. +# #SAML_ATTRIBUTE_GROUP = 'https://example.edu/pdns-groups' +# #SAML_GROUP_ADMIN_NAME = 'PowerDNSAdmin-Administrators' + # SAML_SP_ENTITY_ID = 'http://' # SAML_SP_CONTACT_NAME = '' # SAML_SP_CONTACT_MAIL = '' @@ -133,8 +172,8 @@ # CAUTION: For production use, usage of self-signed certificates it's highly discouraged. # Use certificates from trusted CA instead # ########################################################################################### -# SAML_CERT_FILE = '/etc/pki/powerdns-admin/cert.crt' -# SAML_CERT_KEY = '/etc/pki/powerdns-admin/key.pem' +# SAML_CERT = '/etc/pki/powerdns-admin/cert.crt' +# SAML_KEY = '/etc/pki/powerdns-admin/key.pem' # Configures if SAML tokens should be encrypted. # SAML_SIGN_REQUEST = False @@ -148,6 +187,10 @@ # #SAML_ASSERTION_ENCRYPTED = True +# Some IdPs, like Okta, do not return Attribute Statements by default +# Set the following to False if you are using Okta and not manually configuring Attribute Statements +# #SAML_WANT_ATTRIBUTE_STATEMENT = True + # Remote authentication settings # Whether to enable remote user authentication or not diff --git a/configs/docker_config.py b/configs/docker_config.py index 6666fc2ce..1668fc72c 100644 --- a/configs/docker_config.py +++ b/configs/docker_config.py @@ -1,115 +1,2 @@ -# Defaults for Docker image -BIND_ADDRESS = '0.0.0.0' PORT = 80 -SQLALCHEMY_DATABASE_URI = 'sqlite:////data/powerdns-admin.db' - -legal_envvars = ( - 'SECRET_KEY', - 'OIDC_OAUTH_API_URL', - 'OIDC_OAUTH_TOKEN_URL', - 'OIDC_OAUTH_AUTHORIZE_URL', - 'BIND_ADDRESS', - 'PORT', - 'LOG_LEVEL', - 'SALT', - 'SQLALCHEMY_TRACK_MODIFICATIONS', - 'SQLALCHEMY_DATABASE_URI', - 'MAIL_SERVER', - 'MAIL_PORT', - 'MAIL_DEBUG', - 'MAIL_USE_TLS', - 'MAIL_USE_SSL', - 'MAIL_USERNAME', - 'MAIL_PASSWORD', - 'MAIL_DEFAULT_SENDER', - 'SAML_ENABLED', - 'SAML_DEBUG', - 'SAML_PATH', - 'SAML_METADATA_URL', - 'SAML_METADATA_CACHE_LIFETIME', - 'SAML_IDP_SSO_BINDING', - 'SAML_IDP_ENTITY_ID', - 'SAML_NAMEID_FORMAT', - 'SAML_ATTRIBUTE_EMAIL', - 'SAML_ATTRIBUTE_GIVENNAME', - 'SAML_ATTRIBUTE_SURNAME', - 'SAML_ATTRIBUTE_NAME', - 'SAML_ATTRIBUTE_USERNAME', - 'SAML_ATTRIBUTE_ADMIN', - 'SAML_ATTRIBUTE_GROUP', - 'SAML_GROUP_ADMIN_NAME', - 'SAML_GROUP_TO_ACCOUNT_MAPPING', - 'SAML_ATTRIBUTE_ACCOUNT', - 'SAML_SP_ENTITY_ID', - 'SAML_SP_CONTACT_NAME', - 'SAML_SP_CONTACT_MAIL', - 'SAML_SIGN_REQUEST', - 'SAML_WANT_MESSAGE_SIGNED', - 'SAML_LOGOUT', - 'SAML_LOGOUT_URL', - 'SAML_ASSERTION_ENCRYPTED', - 'OFFLINE_MODE', - 'REMOTE_USER_LOGOUT_URL', - 'REMOTE_USER_COOKIES', - 'SIGNUP_ENABLED', - 'LOCAL_DB_ENABLED', - 'LDAP_ENABLED', - 'SAML_CERT', - 'SAML_KEY', - 'FILESYSTEM_SESSIONS_ENABLED' -) - -legal_envvars_int = ('PORT', 'MAIL_PORT', 'SAML_METADATA_CACHE_LIFETIME') - -legal_envvars_bool = ( - 'SQLALCHEMY_TRACK_MODIFICATIONS', - 'HSTS_ENABLED', - 'MAIL_DEBUG', - 'MAIL_USE_TLS', - 'MAIL_USE_SSL', - 'SAML_ENABLED', - 'SAML_DEBUG', - 'SAML_SIGN_REQUEST', - 'SAML_WANT_MESSAGE_SIGNED', - 'SAML_LOGOUT', - 'SAML_ASSERTION_ENCRYPTED', - 'OFFLINE_MODE', - 'REMOTE_USER_ENABLED', - 'SIGNUP_ENABLED', - 'LOCAL_DB_ENABLED', - 'LDAP_ENABLED', - 'FILESYSTEM_SESSIONS_ENABLED' -) - -# import everything from environment variables -import os -import sys - - -def str2bool(v): - return v.lower() in ("true", "yes", "1") - - -for v in legal_envvars: - - ret = None - # _FILE suffix will allow to read value from file, usefull for Docker's - # secrets feature - if v + '_FILE' in os.environ: - if v in os.environ: - raise AttributeError( - "Both {} and {} are set but are exclusive.".format( - v, v + '_FILE')) - with open(os.environ[v + '_FILE']) as f: - ret = f.read() - f.close() - - elif v in os.environ: - ret = os.environ[v] - - if ret is not None: - if v in legal_envvars_bool: - ret = str2bool(ret) - if v in legal_envvars_int: - ret = int(ret) - sys.modules[__name__].__dict__[v] = ret +SQLALCHEMY_DATABASE_URI = 'sqlite:////data/powerdns-admin.db' \ No newline at end of file diff --git a/deploy/auto-setup/setup_linux.sh b/deploy/auto-setup/setup_linux.sh new file mode 100644 index 000000000..19cda5893 --- /dev/null +++ b/deploy/auto-setup/setup_linux.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# Create a new group for PowerDNS-Admin +groupadd powerdnsadmin + +# Create a user for PowerDNS-Admin +useradd --system -g powerdnsadmin powerdnsadmin + +# Make the new user and group the owners of the PowerDNS-Admin files +chown -R powerdnsadmin:powerdnsadmin /opt/web/powerdns-admin + +# Start the PowerDNS-Admin service +systemctl start powerdns-admin + +# Enable the PowerDNS-Admin service to start automatically at boot +systemctl enable powerdns-admin diff --git a/deploy/auto-setup/setup_win.bat b/deploy/auto-setup/setup_win.bat new file mode 100644 index 000000000..a5ee9fd56 --- /dev/null +++ b/deploy/auto-setup/setup_win.bat @@ -0,0 +1,16 @@ +@echo off + +rem Create a new group for PowerDNS-Admin +net localgroup powerdnsadmin /add + +rem Create a user for PowerDNS-Admin +net user powerdnsadmin /add /passwordchg:no /homedir:nul /active:yes /expires:never /passwordreq:no /s + +rem Make the new user and group the owners of the PowerDNS-Admin files +icacls "C:\path\to\powerdns-admin" /setowner "powerdnsadmin" + +rem Start the PowerDNS-Admin service +net start powerdns-admin + +rem Enable the PowerDNS-Admin service to start automatically at boot +sc config powerdns-admin start= auto diff --git a/deploy/docker/portainer.yaml b/deploy/docker/portainer.yaml new file mode 100644 index 000000000..84402ef16 --- /dev/null +++ b/deploy/docker/portainer.yaml @@ -0,0 +1,15 @@ +version: '3.3' +services: + + core: + image: powerdnsadmin/pda-legacy:latest + restart: unless-stopped + environment: + - SECRET_KEY=INSECURE-CHANGE-ME-9I0DAtfkfj5JmBkPSaHah3ECAa8Df5KK + ports: + - "12000:9191" + volumes: + - "core_data:/data" + +volumes: + core_data: diff --git a/deploy/kubernetes/README.md b/deploy/kubernetes/README.md new file mode 100644 index 000000000..f0394b015 --- /dev/null +++ b/deploy/kubernetes/README.md @@ -0,0 +1,2 @@ +# Kubernetes +Example and simplified deployment for kubernetes. diff --git a/deploy/kubernetes/configmap.yml b/deploy/kubernetes/configmap.yml new file mode 100644 index 000000000..4270db254 --- /dev/null +++ b/deploy/kubernetes/configmap.yml @@ -0,0 +1,8 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: powerdnsadmin-env +data: + FLASK_APP: powerdnsadmin/__init__.py + SECRET_KEY: changeme_secret + SQLALCHEMY_DATABASE_URI: 'mysql://user:password@host/database' diff --git a/deploy/kubernetes/deployment.yml b/deploy/kubernetes/deployment.yml new file mode 100644 index 000000000..d3e4cfe96 --- /dev/null +++ b/deploy/kubernetes/deployment.yml @@ -0,0 +1,29 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: powerdnsadmin + labels: + app: powerdnsadmin +spec: + strategy: + type: RollingUpdate + replicas: 1 + selector: + matchLabels: + app: powerdnsadmin + template: + metadata: + labels: + app: powerdnsadmin + spec: + containers: + - name: powerdnsadmin + image: powerdnsadmin/pda-legacy + ports: + - containerPort: 80 + protocol: TCP + envFrom: + - configMapRef: + name: powerdnsadmin-env + imagePullPolicy: Always + restartPolicy: Always \ No newline at end of file diff --git a/deploy/kubernetes/service.yml b/deploy/kubernetes/service.yml new file mode 100644 index 000000000..813b37b60 --- /dev/null +++ b/deploy/kubernetes/service.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: powerdnsadmin + namespace: powerdnsadmin + labels: + app: powerdnsadmin +spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + app: powerdnsadmin + diff --git a/docker-compose-test.yml b/docker-compose-test.yml index 7dcf4a027..77c8dba22 100644 --- a/docker-compose-test.yml +++ b/docker-compose-test.yml @@ -1,11 +1,11 @@ -version: "2.1" +version: "3.8" services: powerdns-admin: + image: powerdns-admin-test build: context: . dockerfile: docker-test/Dockerfile - image: powerdns-admin-test container_name: powerdns-admin-test ports: - "9191:80" @@ -17,10 +17,10 @@ services: - pdns-server pdns-server: + image: pdns-server-test build: context: . dockerfile: docker-test/Dockerfile.pdns - image: pdns-server-test ports: - "5053:53" - "5053:53/udp" diff --git a/docker-compose.yml b/docker-compose.yml index e18d68376..74ff185f1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: "3" services: app: - image: ngoduykhanh/powerdns-admin:latest + image: powerdnsadmin/pda-legacy:latest container_name: powerdns_admin ports: - "9191:80" @@ -15,4 +15,3 @@ services: - GUNICORN_TIMEOUT=60 - GUNICORN_WORKERS=2 - GUNICORN_LOGLEVEL=DEBUG - - OFFLINE_MODE=False # True for offline, False for external resources diff --git a/docker-test/Dockerfile b/docker-test/Dockerfile index 577e12004..7191825e6 100644 --- a/docker-test/Dockerfile +++ b/docker-test/Dockerfile @@ -1,15 +1,36 @@ -FROM debian:stretch-slim +FROM debian:bullseye-slim LABEL maintainer="k@ndk.name" ENV LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 RUN apt-get update -y \ - && apt-get install -y --no-install-recommends apt-transport-https locales locales-all python3-pip python3-setuptools python3-dev curl libsasl2-dev libldap2-dev libssl-dev libxml2-dev libxslt1-dev libxmlsec1-dev libffi-dev build-essential libmariadb-dev-compat \ - && curl -sL https://deb.nodesource.com/setup_10.x | bash - \ + && apt-get install -y --no-install-recommends \ + apt-transport-https \ + curl \ + build-essential \ + libffi-dev \ + libldap2-dev \ + libmariadb-dev-compat \ + libpq-dev \ + libsasl2-dev \ + libssl-dev \ + libxml2-dev \ + libxmlsec1-dev \ + libxmlsec1-openssl \ + libxslt1-dev \ + locales \ + locales-all \ + pkg-config \ + python3-dev \ + python3-pip \ + python3-setuptools \ + && curl -sL https://deb.nodesource.com/setup_lts.x | bash - \ && curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \ && echo "deb https://dl.yarnpkg.com/debian/ stable main" > /etc/apt/sources.list.d/yarn.list \ && apt-get update -y \ - && apt-get install -y nodejs yarn \ + && apt-get install -y --no-install-recommends \ + nodejs \ + yarn \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* @@ -21,8 +42,6 @@ RUN pip3 install --upgrade pip RUN pip3 install -r requirements.txt COPY . /app -COPY ./docker/entrypoint.sh /usr/local/bin/ -RUN chmod +x /usr/local/bin/entrypoint.sh ENV FLASK_APP=powerdnsadmin/__init__.py RUN yarn install --pure-lockfile --production \ @@ -31,4 +50,4 @@ RUN yarn install --pure-lockfile --production \ COPY ./docker-test/wait-for-pdns.sh /opt RUN chmod u+x /opt/wait-for-pdns.sh -CMD ["/opt/wait-for-pdns.sh", "/usr/local/bin/pytest","--capture=no","-vv"] +CMD ["/opt/wait-for-pdns.sh", "/usr/local/bin/pytest", "-W", "ignore::DeprecationWarning", "--capture=no", "-vv"] diff --git a/docker-test/start.sh b/docker-test/start.sh index 9a6601764..efd1c0e25 100644 --- a/docker-test/start.sh +++ b/docker-test/start.sh @@ -10,9 +10,9 @@ fi # Import schema structure if [ -e "/data/pdns.sql" ]; then - rm /data/pdns.db + rm -f /data/pdns.db cat /data/pdns.sql | sqlite3 /data/pdns.db - rm /data/pdns.sql + rm -f /data/pdns.sql echo "Imported schema structure" fi diff --git a/docker/Dockerfile b/docker/Dockerfile index 5296e0238..519e0775f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,13 +1,14 @@ -FROM alpine:3.13 AS builder -LABEL maintainer="k@ndk.name" +FROM alpine:3.17 AS builder ARG BUILD_DEPENDENCIES="build-base \ libffi-dev \ + libpq-dev \ libxml2-dev \ mariadb-connector-c-dev \ openldap-dev \ python3-dev \ xmlsec-dev \ + npm \ yarn \ cargo" @@ -30,7 +31,7 @@ COPY ./requirements.txt /build/requirements.txt # Get application dependencies RUN pip install --upgrade pip && \ - pip install -r requirements.txt + pip install --use-pep517 -r requirements.txt # Add sources COPY . /build @@ -38,7 +39,7 @@ COPY . /build # Prepare assets RUN yarn install --pure-lockfile --production && \ yarn cache clean && \ - sed -i -r -e "s|'cssmin',\s?'cssrewrite'|'cssmin'|g" /build/powerdnsadmin/assets.py && \ + sed -i -r -e "s|'rcssmin',\s?'cssrewrite'|'rcssmin'|g" /build/powerdnsadmin/assets.py && \ flask assets build RUN mv /build/powerdnsadmin/static /tmp/static && \ @@ -46,6 +47,7 @@ RUN mv /build/powerdnsadmin/static /tmp/static && \ cp -r /tmp/static/generated /build/powerdnsadmin/static && \ cp -r /tmp/static/assets /build/powerdnsadmin/static && \ cp -r /tmp/static/img /build/powerdnsadmin/static && \ + find /tmp/static/node_modules -name 'webfonts' -exec cp -r {} /build/powerdnsadmin/static \; && \ find /tmp/static/node_modules -name 'fonts' -exec cp -r {} /build/powerdnsadmin/static \; && \ find /tmp/static/node_modules/icheck/skins/square -name '*.png' -exec cp {} /build/powerdnsadmin/static/generated \; @@ -66,12 +68,12 @@ RUN mkdir -p /app && \ cp -r /build/configs/docker_config.py /app/configs # Build image -FROM alpine:3.13 +FROM alpine:3.17 ENV FLASK_APP=/app/powerdnsadmin/__init__.py \ USER=pda -RUN apk add --no-cache mariadb-connector-c postgresql-client py3-gunicorn py3-psycopg2 xmlsec tzdata libcap && \ +RUN apk add --no-cache mariadb-connector-c postgresql-client py3-gunicorn py3-pyldap py3-flask py3-psycopg2 xmlsec tzdata libcap && \ addgroup -S ${USER} && \ adduser -S -D -G ${USER} ${USER} && \ mkdir /data && \ @@ -80,7 +82,7 @@ RUN apk add --no-cache mariadb-connector-c postgresql-client py3-gunicorn py3-ps apk del libcap COPY --from=builder /usr/bin/flask /usr/bin/ -COPY --from=builder /usr/lib/python3.8/site-packages /usr/lib/python3.8/site-packages/ +COPY --from=builder /usr/lib/python3.10/site-packages /usr/lib/python3.10/site-packages/ COPY --from=builder --chown=root:${USER} /app /app/ COPY ./docker/entrypoint.sh /usr/bin/ @@ -90,6 +92,6 @@ RUN chown ${USER}:${USER} ./configs /app && \ EXPOSE 80/tcp USER ${USER} -HEALTHCHECK CMD ["wget","--output-document=-","--quiet","--tries=1","http://127.0.0.1/"] +HEALTHCHECK --interval=5s --timeout=5s --start-period=20s --retries=5 CMD wget --output-document=- --quiet --tries=1 http://127.0.0.1${SCRIPT_NAME:-/} ENTRYPOINT ["entrypoint.sh"] CMD ["gunicorn","powerdnsadmin:create_app()"] diff --git a/docs/API.md b/docs/API.md index d7e3732e0..7514d5c80 100644 --- a/docs/API.md +++ b/docs/API.md @@ -11,6 +11,8 @@ #### Accessing the API +PDA has its own API, that should not be confused with the PowerDNS API. Keep in mind that you have to enable PowerDNS API with a key that will be used by PDA to manage it. Therefore, you should use PDA created keys to browse PDA's API, on PDA's adress and port. They don't grant access to PowerDNS' API. + The PDA API consists of two distinct parts: - The /powerdnsadmin endpoints manages PDA content (accounts, users, apikeys) and also allow domain creation/deletion diff --git a/docs/CODE_OF_CONDUCT.md b/docs/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..ed3cb4741 --- /dev/null +++ b/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at [admin@powerdnsadmin.org](mailto:admin@powerdnsadmin.org). All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md new file mode 100644 index 000000000..2a978dc6c --- /dev/null +++ b/docs/CONTRIBUTING.md @@ -0,0 +1,107 @@ +# Contribution Guide + +**Looking for help?** Try taking a look at the project's +[Support Guide](https://github.com/PowerDNS-Admin/PowerDNS-Admin/blob/master/.github/SUPPORT.md) or joining +our [Discord Server](https://discord.powerdnsadmin.org). + +
+

+ :bug: Report a bug · + :bulb: Suggest a feature · + :arrow_heading_up: Submit a pull request +

+

+ :rescue_worker_helmet: Become a maintainer · + :heart: Other ideas +

+
+

+ +Some general tips for engaging here on GitHub: + +* Register for a free [GitHub account](https://github.com/signup) if you haven't already. +* You can use [GitHub Markdown](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) for formatting text and adding images. +* To help mitigate notification spam, please avoid "bumping" issues with no activity. (To vote an issue up or down, use a :thumbsup: or :thumbsdown: reaction.) +* Please avoid pinging members with `@` unless they've previously expressed interest or involvement with that particular issue. + +## [Project Update - PLEASE READ!!!](https://github.com/PowerDNS-Admin/PowerDNS-Admin/discussions/1708) + +## :bug: Reporting Bugs + +* First, ensure that you're running the [latest stable version](https://github.com/PowerDNS-Admin/PowerDNS-Admin/releases) of PDA. If you're running an older version, there's a chance that the bug has already been fixed. + +* Next, search our [issues list](https://github.com/PowerDNS-Admin/PowerDNS-Admin/issues?q=is%3Aissue) to see if the bug you've found has already been reported. If you come across a bug report that seems to match, please click "add a reaction" in the top right corner of the issue and add a thumbs up (:thumbsup:). This will help draw more attention to it. Any comments you can add to provide additional information or context would also be much appreciated. + +* If you can't find any existing issues (open or closed) that seem to match yours, you're welcome to [submit a new bug report](https://github.com/PowerDNS-Admin/PowerDNS-Admin/issues/new/choose). Be sure to complete the entire report template, including detailed steps that someone triaging your issue can follow to confirm the reported behavior. (If we're not able to replicate the bug based on the information provided, we'll ask for additional detail.) + +* Some other tips to keep in mind: + * Error messages and screenshots are especially helpful. + * Don't prepend your issue title with a label like `[Bug]`; the proper label will be assigned automatically. + * Verify that you have GitHub notifications enabled and are subscribed to your issue after submitting. + * We appreciate your patience as bugs are prioritized by their severity, impact, and difficulty to resolve. + +## :bulb: Feature Requests + +* First, check the GitHub [issues list](https://github.com/PowerDNS-Admin/PowerDNS-Admin/issues?q=is%3Aissue) to see if the feature you have in mind has already been proposed. If you happen to find an open feature request that matches your idea, click "add a reaction" in the top right corner of the issue and add a thumbs up (:thumbsup:). This ensures that the issue has a better chance of receiving attention. Also feel free to add a comment with any additional justification for the feature. + +* If you have a rough idea that's not quite ready for formal submission yet, start a [GitHub discussion](https://github.com/PowerDNS-Admin/PowerDNS-Admin/discussions) instead. This is a great way to test the viability and narrow down the scope of a new feature prior to submitting a formal proposal, and can serve to generate interest in your idea from other community members. + +* Once you're ready, submit a feature request [using this template](https://github.com/PowerDNS-Admin/PowerDNS-Admin/issues/choose). Be sure to provide sufficient context and detail to convey exactly what you're proposing and why. The stronger your use case, the better chance your proposal has of being accepted. + +* Some other tips to keep in mind: + * Don't prepend your issue title with a label like `[Feature]`; the proper label will be assigned automatically. + * Try to anticipate any likely questions about your proposal and provide that information proactively. + * Verify that you have GitHub notifications enabled and are subscribed to your issue after submitting. + * You're welcome to volunteer to implement your FR, but don't submit a pull request until it has been approved. + +## :arrow_heading_up: Submitting Pull Requests + +* [Pull requests](https://docs.github.com/en/pull-requests) (a feature of GitHub) are used to propose changes to PDA's code base. Our process generally goes like this: + * A user opens a new issue (bug report or feature request) + * A maintainer triages the issue and may mark it as needing an owner + * The issue's author can volunteer to own it, or someone else can + * A maintainer assigns the issue to whomever volunteers + * The issue owner submits a pull request that will resolve the issue + * A maintainer reviews and merges the pull request, closing the issue + +* It's very important that you not submit a pull request until a relevant issue has been opened **and** assigned to you. Otherwise, you risk wasting time on work that may ultimately not be needed. + +* New pull requests should generally be based off of the `dev` branch, rather than `master`. The `dev` branch is used for ongoing development, while `master` is used for tracking stable releases. + +* In most cases, it is not necessary to add a changelog entry: A maintainer will take care of this when the PR is merged. (This helps avoid merge conflicts resulting from multiple PRs being submitted simultaneously.) + +* All code submissions should meet the following criteria (CI will eventually enforce these checks): + * Python syntax is valid + * PEP 8 compliance is enforced, with the exception that lines may be + greater than 80 characters in length + +* Some other tips to keep in mind: + * If you'd like to volunteer for someone else's issue, please post a comment on that issue letting us know. (This will allow the maintainers to assign it to you.) + * All new functionality must include relevant tests where applicable. + +## :rescue_worker_helmet: Become a Maintainer + +We're always looking for motivated individuals to join the maintainers team and help drive PDA's long-term development. Some of our most sought-after skills include: + +* Python development with a strong focus on the [Flask](https://flask.palletsprojects.com/) and [Django](https://www.djangoproject.com/) frameworks +* Expertise working with SQLite, MySQL, and/or PostgreSQL databases +* Javascript & TypeScript proficiency +* A knack for web application design (HTML & CSS) +* Familiarity with git and software development best practices +* Excellent attention to detail +* Working experience in the field of network operations as it relates to the use of DNS (Domain Name System) servers. + +We generally ask that maintainers dedicate around four hours of work to the project each week on average, which includes both hands-on development and project management tasks such as issue triage. + +We do maintain an active Mattermost instance for internal communication, but we also use GitHub issues for project management. + +Some maintainers petition their employer to grant some of their paid time to work on PDA. + +Interested? You can contact our lead maintainer, Matt Scott, at admin@powerdnsadmin.org. We'd love to have you on the team! + +## :heart: Other Ways to Contribute + +You don't have to be a developer to contribute to PDA: There are plenty of other ways you can add value to the community! Below are just a few examples: + +* Help answer questions and provide feedback in our [GitHub discussions](https://github.com/PowerDNS-Admin/PowerDNS-Admin/discussions). +* Write a blog article or record a YouTube video demonstrating how PDA is used at your organization. diff --git a/docs/announcements/project-update-2022-12-09.md b/docs/announcements/project-update-2022-12-09.md new file mode 100644 index 000000000..427751bb0 --- /dev/null +++ b/docs/announcements/project-update-2022-12-09.md @@ -0,0 +1,100 @@ +# PDA Project Update + +## Introduction + +Hello PDA community members, + +My name is Matt Scott, and I am the owner of [Azorian Solutions](https://azorian.solutions), a consultancy for the +Internet Service Provider (ISP) industry. I'm pleased to announce that I have taken ownership of the PDA project and +will be taking over the lead maintainer role, effective immediately. + +Please always remember and thank both [Khanh Ngo](https://github.com/ngoduykhanh) and +[Jérôme Becot](https://github.com/jbe-dw) for their efforts in keeping this project alive thus far. Without the effort +of Khanh creating the PDA project and community, and the efforts of Jérôme for holding up the lead maintainer role after +Khanh had to step down, this project would not still be alive today. + +With that being said, please read through all the following announcements as they are important if you're an active PDA +user or community member. I intend to make many great enhancements to the project, but it could be a bumpy road ahead. + +### Project Maintenance + +As it stands today, contributions to the project are at a low. At this point, there is a rather large backlog of issues +and feature requests in contrast to the current maintenance capacities. This is not to say you should lose hope though! +As part of this project transition, some additional contribution interest has been generated and I expect to attract +more with the changes I'm planning to make. In the near future, I may by-pass some usual maintenance processes in order +to expedite some changes to the project that have been outstanding for some time. + +This is to say however that unless the project attracts a healthy new contribution base, issues may continue to pile up +as maintenance capacity is rather limited. This is further complicated by the fact that the current code base is harder +to follow naturally since it largely lacks uniformity and standards. This lack of uniformity has lead to a difficult +situation that makes implementing certain changes less effective. This status quo is not uncommon with projects born how +PDA was born, so it's unfortunate but not unexpected. + +### Change of Direction + +In order to reorganize the project and get it on a track to a future that allows it to contend with other commercial +quality products, I had to make many considerations to the proficiencies of two unique paths forward to achieve this +goal. One path forward is seemingly obvious, continue maintaining the current code base while overhauling it to shift it +towards the envisioned goal. The other path is a fresh solution design with a complete rebuild. + +The answer to the aforementioned decision might seem obvious to those of you who typically favor the "don't reinvent the +wheel" mentality. I'm unclear of the details surrounding the original use-case that drove the development of this +project, but I don't believe it was on-par with some use-cases we see today which include operators handling many tens +of thousands of zones and/or records. There are many changes that have been (sometimes) haphazardly implemented which +has lead to the previously mentioned lack of uniformity among other issues. To put it simply, I'm not sure if the +project ever had a grand vision per se but instead was mostly reactionary to community requests. + +I believe that the current project has served the community fairly well from what I can tell. I know the product has +certainly helped me in my professional efforts with many environments. I also believe that it's time to pivot so that +the project can realize it's true potential, considering the existing user base. For this reason, I am beginning the +planning phase of a project overhaul. This effort will involve a complete re-engineering of the project's contribution +standards and requirements, technology stack, and project structure. + +This was not an easy decision to come to but one must appreciate that there aren't as many people that can get very +excited about working on the current project code base. The current project has many barriers to entry which I intend to +drastically impact with future changes. The reality is that it's easier to gain contribution participation with a new +build effort as it offers an opportunity to own a part of the project with impactful contributions. + +### Project Enhancements + +Since this is the beginning of a rebirth of the project so to speak, I want to implement a new operational tactic that +will hopefully drive contributions through incentive. Many of us understand that any project, needs a leader to stay on +track and organized. If everything were a democratic process, it would take too long and suffer unnecessary challenges. +With that being said, I do believe that there is plenty of opportunity through-out various development phases of the +project to allow for a democratic process where the community contributors and members can participate in the +decision-making. + +The plan to achieve the aforementioned democratic goal is to centralize communications and define some basic structured +processes. To do this, more effective methods of communication have been implemented to allow those interested in +contributing to easily participate in fluid, open communication. This has already been proving to be quite effective for +exchanging ideas and visions while addressing the issue with contributors living in vastly different time zones. This is +effectively a private chat hosted by the PDA project using Mattermost (a Slack-like alternative). + +Even if you aren't in a position to directly contribute work to the project, you can still contribute by participating +in these very important and early discussions that will impact the solution engineering. If the PDA project is an +important tool in your organization, I encourage you to join the conversation and contribute where applicable your +use-cases. Having more insight on the community use-cases will only benefit the future of this project. + +If you're interested in joining the conversation, please email me at +[admin@powerdnsadmin.org](mailto:admin@powerdnsadmin.org) for an invitation. + +### Re-branding + +As part of this project transition, I will also be changing the naming scheme in order to support the future development +efforts toward a newly engineered solution. The current PDA project will ultimately become known as the "PDA Legacy" +application. This change will help facilitate the long-term solution to take the branding position of the existing +solution. Another effort I will be making is to get an app landing page online at the project's new domain: +[powerdnsadmin.org](https://powerdnsadmin.org). This will act as one more point of online exposure for the project which +will hopefully lend itself well to attracting additional community members. + +### Contribution Requirements + +Another big change that will be made with the new project, will be well-defined contribution requirements. I realize +these requirements can be demotivating for some, but they are a necessary evil to ensure the project actually achieves +its goals effectively. It's important to always remember that strict requirements are to everyone's benefit as they push +for order where chaos is quite destructive. + +### Closing + +I hope these announcements garner more participation in the PDA community. The project definitely needs more help to +achieve any goal at this point, so your participation is valued! diff --git a/docs/announcements/project-update-2023-11-25.md b/docs/announcements/project-update-2023-11-25.md new file mode 100644 index 000000000..5e1719f34 --- /dev/null +++ b/docs/announcements/project-update-2023-11-25.md @@ -0,0 +1,109 @@ +# PDA Project Update + +## Introduction + +Hello PDA community members, + +I know it has been quite awhile since the last formal announcement like this. Things have been quite busy and difficult +for me both professional and personally. While I try hard to never make my problems someone else's problems, I do +believe it's important to be transparent with the community. I'm not going to go into details, but I will say that I +have been dealing with some mental health issues that have been quite challenging. I'm not one to give up though, +so I'm pushing through and trying to get back on track. + +With that being said, let's jump into the announcements. + +### Project Maintenance + +Granted I haven't been nearly as active on the project as I would like to be, I have been keeping an eye on things and +trying to keep up with the maintenance. I know there are a lot of issues and feature requests that have been piling up, +and I'm sorry for that. Even if I had been more active in recent months, it would have not changed the true root cause +of the issue. + +This project was started out of a need for an individual's own use-case. I don't believe it was never intended to be a +commercial quality product nor a community project. It did however gain traction quickly and the community grew. This +is a great thing, but it also comes with some challenges. The biggest challenge is that the project was never designed +to be a community project. This means that the project lacks many of the things that are required to effectively manage +a community project. This is not to say that the project is doomed, but many of the fast-paced changes combined with +the lack of standards has lead to a difficult situation that makes implementing certain changes incredibly unproductive +and quite often, entirely counter-productive. + +After many years of accepting contributions from those who are not professional developers, the project has become quite +difficult to maintain. This is not to say that I don't appreciate the contributions, but it's important to understand +that the state of the code-base for the project is not in a good place. This is not uncommon with projects born how PDA +was born, so it's unfortunate but not unexpected. + +As of today, there are so many dependencies and a large amount of very poorly implemented features that it's difficult +to make any changes without breaking many other pieces. This is further complicated by the fact that the current code +base is harder to follow naturally since it largely lacks uniformity and standards. This lack of uniformity has lead to +a situation where automated regression testing is not possible. This is a very important aspect of any project that +expects to be able to make changes without breaking things. This is also a very important aspect of any project that +expects to be able to accept contributions from the community with minimum management resources. + +The hard reality is that the majority of stakeholders in the project are not professional developers. This naturally +means the amount of people that can offer quality contributions is very limited. This problem is further aggravated by +the poor quality feature implementation which is very hard to follow, even for seasoned developers like myself. So many +seemingly small issues that have been reported, have lead to finding that the resolution is not as simple as it seems. + +### New Direction + +As I previously stated in my last formal announcement, we would be working towards a total replacement of the project. +Unfortunately, this is not a simple task, and it's not something that can be done quickly. Furthermore, with +increasingly limited capacity in our own lives to work on this, we are essentially drowning in a sea of technical debt +created by the past decisions of the project to accept all contributions. We have essentially reached a point where +far too much time and resources are being wasted just to attempt to meet the current demand of requests on the current +edition of PDA. This is a tragedy because the efforts that are invested into the current edition, really aren't +creating true progress for the project, but instead merely delaying the inevitable. + +As I have stated before to many community members, one aspect of taking over management of this project to ultimately +save it and keep it alive, would involve making hard decisions that many will not agree with. It's unfortunate that +many of those who are less than supportive of these decisions, often lack the appropriate experience to understand the +importance of these decisions. I'm not saying that I'm always right, but I am saying that it's not hard to see where +this is headed without some drastic changes. + +With all of that being said, it's time for me to make some hard decisions. I have decided that the best course of +action is to stop accepting contributions to the current edition of PDA. At this point, due to the aforementioned +issues that lead to breaking the application with seemingly simple changes, it's just not worth the effort to try to +keep up with the current edition. This is not to say that I'm giving up on the project, but instead I'm going to +re-focus my efforts on the new edition of PDA. This is the only way to ensure that the project will survive and +hopefully thrive in the future. + +I will not abandon the current set of updates that were planned for the next release of `0.4.2` however. I have +re-scheduled that release to be out by the end of the year. This will be the last release of the current edition of +PDA. The consensus from some users is that the current edition is stable enough to be used in production environments. +I don't necessarily agree with that, but I do believe that it's stable enough to be used in production +environments with the understanding that it's not a commercial quality product. + +### Future Contributions + +For those of you wondering about contributions to the new edition of PDA, the answer for now is simple. I won't be +accepting any contributions to the new edition until I can achieve a stable release that delivers the core features of +the current edition. This is not to say that I won't be accepting any contributions at all, but instead that I will be +very selective about what contributions I accept. I believe this is the only way to ensure that a solid foundation not +only takes shape, but remains solid. + +It is well understood that many developers have their own ways of doing things, but it's important to understand +that this project is not a personal project. This project is a community project and therefore must be treated as such. +This means that the project must be engineered in a way that allows for the community to participate in the development +process. This is not possible if the project is not engineered in a way that is easy to follow and understand. + +### Project Enhancements + +It should be understood that one of the greatest benefits of this pivot is that it will allow for a more structured +development process. As a result of that, the project could potentially see a future where it adopts a whole new set of +features that weren't previously imagined. One prime example of this could be integration with registrar APIs. This +could make easy work of tasks such as DNSSEC key rotation, which is currently a very manual process. + +I am still working on final project requirements for additional phases of the new PDA edition, but these additions +won't receive any attention until the core features are implemented. I will be sure to make announcements as these +requirements are finalized. It is my intention to follow a request for proposal (RFP) process for these additional +features. This will allow the community to participate in the decision-making process for future expansion of the +project. + +### Closing + +I hope that by the time you have reached this point in the announcement, that I have elicited new hope for the +long-term future of the project. I know that many of you have been waiting for a long time for some of the features that have been +requested. I know that many of you have been waiting for a long time for some of the issues to be resolved, for +requested features to be implemented, and for the project to be more stable. It's unfortunate that it has taken this +long to get to this point, but this is the nature of life itself. I hope that you can understand that this is the only +reasonable gamble that the project survives and thrives in the future. diff --git a/docs/oauth.md b/docs/oauth.md index f84ac6999..b493f8762 100644 --- a/docs/oauth.md +++ b/docs/oauth.md @@ -51,6 +51,7 @@ Enable OpenID Connect OAuth option. * API URL, /auth (The ending can be different with each provider) * Token URL, /token * Authorize URL, /auth +* Metadata URL, /.well-known/openid-configuration * Logout URL, /logout * Username, This will be the claim that will be used as the username. (Usually preferred_username) diff --git a/docs/screenshots/dashboard.png b/docs/screenshots/dashboard.png new file mode 100644 index 000000000..e9d6ff105 Binary files /dev/null and b/docs/screenshots/dashboard.png differ diff --git a/docs/wiki/README.md b/docs/wiki/README.md new file mode 100644 index 000000000..65165c23e --- /dev/null +++ b/docs/wiki/README.md @@ -0,0 +1,50 @@ +# PowerDNS-Admin wiki + +## Database Setup guides + +- [MySQL / MariaDB](database-setup/Setup-MySQL-or-MariaDB.md) +- [PostgreSQL](database-setup/Setup-PostgreSQL.md) + +## Installation guides + +- [General (Read this first)](install/General.md) + - BSD: + - [Install on FreeBSD 12.1-RELEASE](install/Running-on-FreeBSD.md) + - Containers: + - [Install on Docker](install/Running-PowerDNS-Admin-on-Docker.md) + - Debian: + - [Install on Ubuntu or Debian](install/Running-PowerDNS-Admin-on-Ubuntu-or-Debian.md) + - Red-Hat: + - [Install on Centos 7](install/Running-PowerDNS-Admin-on-Centos-7.md) + - [Install on Fedora 23](install/Running-PowerDNS-Admin-on-Fedora-23.md) + - [Install on Fedora 30](install/Running-PowerDNS-Admin-on-Fedora-30.md) + +### Post install Setup + +- [Environment Variables](configuration/Environment-variables.md) +- [Getting started](configuration/Getting-started.md) +- SystemD: + - [Running PowerDNS-Admin as a service using Systemd](install/Running-PowerDNS-Admin-as-a-service-(Systemd).md) + +### Web Server configuration + +- [Supervisord](web-server/Supervisord-example.md) +- [Systemd](web-server/Systemd-example.md) +- [Systemd + Gunicorn + Nginx](web-server/Running-PowerDNS-Admin-with-Systemd-Gunicorn-and-Nginx.md) +- [Systemd + Gunicorn + Apache](web-server/Running-PowerDNS-Admin-with-Systemd,-Gunicorn-and-Apache.md) +- [uWSGI](web-server/uWSGI-example.md) +- [WSGI-Apache](web-server/WSGI-Apache-example.md) +- [Docker-ApacheReverseProxy](web-server/Running-Docker-Apache-Reverseproxy.md) + +## Using PowerDNS-Admin + +- Setting up a zone +- Adding a record + +## Feature usage + +- [DynDNS2](features/DynDNS2.md) + +## Debugging + +- [Debugging the build process](debug/build-process.md) diff --git a/docs/wiki/configuration/Configure-Active-Directory-Authentication-using-Group-Security.md b/docs/wiki/configuration/Configure-Active-Directory-Authentication-using-Group-Security.md new file mode 100644 index 000000000..f6c032f56 --- /dev/null +++ b/docs/wiki/configuration/Configure-Active-Directory-Authentication-using-Group-Security.md @@ -0,0 +1,34 @@ +Active Directory Setup - Tested with Windows Server 2012 + +1) Login as an admin to PowerDNS Admin + +2) Go to Settings --> Authentication + +3) Under Authentication, select LDAP + +4) Click the Radio Button for Active Directory + +5) Fill in the required info - + +* LDAP URI - ldap://ip.of.your.domain.controller:389 +* LDAP Base DN - dc=yourdomain,dc=com +* Active Directory domain - yourdomain.com +* Basic filter - (objectCategory=person) + * the brackets here are **very important** +* Username field - sAMAccountName +* GROUP SECURITY - Status - On +* Admin group - CN=Your_AD_Admin_Group,OU=Your_AD_OU,DC=yourdomain,DC=com +* Operator group - CN=Your_AD_Operator_Group,OU=Your_AD_OU,DC=yourdomain,DC=com +* User group - CN=Your_AD_User_Group,OU=Your_AD_OU,DC=yourdomain,DC=com + +6) Click Save + +7) Logout and re-login as an LDAP user from each of the above groups. + +If you're having problems getting the correct information for your groups, the following tool can be useful - + +https://docs.microsoft.com/en-us/sysinternals/downloads/adexplorer + +In our testing, groups with spaces in the name did not work, we had to create groups with underscores to get everything operational. + +YMMV diff --git a/docs/wiki/configuration/Environment-variables.md b/docs/wiki/configuration/Environment-variables.md new file mode 100644 index 000000000..7f42727ec --- /dev/null +++ b/docs/wiki/configuration/Environment-variables.md @@ -0,0 +1,65 @@ +# Supported environment variables + +| Variable | Description | Required | Default value | +|--------------------------------|--------------------------------------------------------------------------|------------|---------------| +| BIND_ADDRESS | +| CSRF_COOKIE_SECURE | +| SESSION_TYPE | null | filesystem | sqlalchemy | | filesystem | +| LDAP_ENABLED | +| LOCAL_DB_ENABLED | +| LOG_LEVEL | +| MAIL_DEBUG | +| MAIL_DEFAULT_SENDER | +| MAIL_PASSWORD | +| MAIL_PORT | +| MAIL_SERVER | +| MAIL_USERNAME | +| MAIL_USE_SSL | +| MAIL_USE_TLS | +| OFFLINE_MODE | +| OIDC_OAUTH_API_URL | | | | +| OIDC_OAUTH_AUTHORIZE_URL | +| OIDC_OAUTH_TOKEN_URL | | | | +| OIDC_OAUTH_METADATA_URL | | | | +| PORT | +| SERVER_EXTERNAL_SSL | Forceful override of URL schema detection when using the url_for method. | False | None | +| REMOTE_USER_COOKIES | +| REMOTE_USER_LOGOUT_URL | +| SALT | +| SAML_ASSERTION_ENCRYPTED | +| SAML_ATTRIBUTE_ACCOUNT | +| SAML_ATTRIBUTE_ADMIN | +| SAML_ATTRIBUTE_EMAIL | +| SAML_ATTRIBUTE_GIVENNAME | +| SAML_ATTRIBUTE_GROUP | +| SAML_ATTRIBUTE_NAME | +| SAML_ATTRIBUTE_SURNAME | +| SAML_ATTRIBUTE_USERNAME | +| SAML_CERT | +| SAML_DEBUG | +| SAML_ENABLED | +| SAML_GROUP_ADMIN_NAME | +| SAML_GROUP_TO_ACCOUNT_MAPPING | +| SAML_IDP_SSO_BINDING | +| SAML_IDP_ENTITY_ID | +| SAML_KEY | +| SAML_LOGOUT | +| SAML_LOGOUT_URL | +| SAML_METADATA_CACHE_LIFETIME | +| SAML_METADATA_URL | +| SAML_NAMEID_FORMAT | +| SAML_PATH | +| SAML_SIGN_REQUEST | +| SAML_SP_CONTACT_MAIL | +| SAML_SP_CONTACT_NAME | +| SAML_SP_ENTITY_ID | +| SAML_WANT_MESSAGE_SIGNED | +| SECRET_KEY | Flask secret key [^1] | Y | no default | +| SESSION_COOKIE_SECURE | +| SIGNUP_ENABLED | +| SQLALCHEMY_DATABASE_URI | SQL Alchemy URI to connect to database | N | no default | +| SQLALCHEMY_TRACK_MODIFICATIONS | +| SQLALCHEMY_ENGINE_OPTIONS | json string. e.g. '{"pool_recycle":600,"echo":1}' [^2] | + +[^1]: Flask secret key (see https://flask.palletsprojects.com/en/1.1.x/config/#SECRET_KEY for how to generate) +[^2]: See Flask-SQLAlchemy Documentation for all engine options. diff --git a/docs/wiki/configuration/Getting-started.md b/docs/wiki/configuration/Getting-started.md new file mode 100644 index 000000000..ce722348c --- /dev/null +++ b/docs/wiki/configuration/Getting-started.md @@ -0,0 +1,16 @@ +# Getting started with PowerDNS-Admin + + +In your FLASK_CONF (check the installation directions for where yours is) file, make sure you have the database URI filled in (in some previous documentation this was called config.py): + +For MySQL / MariaDB: +``` +SQLALCHEMY_DATABASE_URI = 'mysql://username:password@127.0.0.1/db_name' +``` + +For Postgres: +``` +SQLALCHEMY_DATABASE_URI = 'postgresql://powerdnsadmin:powerdnsadmin@127.0.0.1/powerdnsadmindb' +``` + +Open your web browser and go to `http://localhost:9191` to visit PowerDNS-Admin web interface. Register a user. The first user will be in the Administrator role. diff --git a/docs/wiki/configuration/basic_settings.md b/docs/wiki/configuration/basic_settings.md new file mode 100644 index 000000000..6a47d6a28 --- /dev/null +++ b/docs/wiki/configuration/basic_settings.md @@ -0,0 +1,17 @@ +### PowerDNSAdmin basic settings + +PowerDNSAdmin has many features and settings available to be turned either off or on. +In this docs those settings will be explain. +To find the settings in the the dashboard go to settings>basic. + +allow_user_create_domain: This setting is used to allow users with the `user` role to create a domain, not possible by +default. + +allow_user_remove_domain: Same as `allow_user_create_domain` but for removing a domain. + +allow_user_view_history: Allow a user with the role `user` to view and access the history. + +custom_history_header: This is a string type variable, when inputting an header name, if exists in the request it will +be in the created_by column in the history, if empty or not mentioned will default to the api_key description. + +site_name: This will be the site name. diff --git a/docs/wiki/database-setup/README.md b/docs/wiki/database-setup/README.md new file mode 100644 index 000000000..e4e4d015d --- /dev/null +++ b/docs/wiki/database-setup/README.md @@ -0,0 +1,4 @@ +# Database setup guides + +- [MySQL / MariaDB](Setup-MySQL-or-MariaDB.md) +- [PostgreSQL](Setup-PostgreSQL.md) diff --git a/docs/wiki/database-setup/Setup-MySQL-or-MariaDB.md b/docs/wiki/database-setup/Setup-MySQL-or-MariaDB.md new file mode 100644 index 000000000..5242b69c3 --- /dev/null +++ b/docs/wiki/database-setup/Setup-MySQL-or-MariaDB.md @@ -0,0 +1,56 @@ +# Setup MySQL database for PowerDNS-Admin + +This guide will show you how to prepare a MySQL or MariaDB database for PowerDNS-Admin. + +We assume the database is installed per your platform's directions (apt, yum, etc). Directions to do this can be found below: +- MariaDB: + - https://mariadb.com/kb/en/getting-installing-and-upgrading-mariadb/ + - https://www.digitalocean.com/community/tutorials/how-to-install-mariadb-on-ubuntu-20-04 +- MySQL: + - https://dev.mysql.com/downloads/mysql/ + - https://www.digitalocean.com/community/tutorials/how-to-install-mysql-on-ubuntu-20-04 + +The following directions assume a default configuration and for productions setups `mysql_secure_installation` has been run. + +## Setup database: + +Connect to the database (Usually using `mysql -u root -p` if a password has been set on the root database user or `sudo mysql` if not), then enter the following: +``` +CREATE DATABASE `powerdnsadmin` CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; +GRANT ALL PRIVILEGES ON `powerdnsadmin`.* TO 'pdnsadminuser'@'localhost' IDENTIFIED BY 'YOUR_PASSWORD_HERE'; +FLUSH PRIVILEGES; +``` +- If your database server is located on a different machine then change 'localhost' to '%' +- Replace YOUR_PASSWORD_HERE with a secure password. + +Once there are no errors you can type `quit` in the mysql shell to exit from it. + +## Install required packages: +### Red-hat based systems: +``` +yum install MariaDB-shared mariadb-devel mysql-community-devel +``` + +### Debian based systems: +``` +apt install libmysqlclient-dev +``` + +### Install python packages: +``` +pip3 install mysqlclient==2.0.1 +``` + +## Known issues: + +Problem: If you plan to manage large zones, you may encounter some issues while applying changes. This is due to PowerDNS-Admin trying to insert the entire modified zone into the column history.detail. + +Using MySQL/MariaDB, this column is created by default as TEXT and thus limited to 65,535 characters. + +Solution: Convert the column to MEDIUMTEXT: +1. Connect to the database shell as described in the setup database section: +2. Execute the following commands: + ``` + USE powerdnsadmin; + ALTER TABLE history MODIFY detail MEDIUMTEXT; + ``` diff --git a/docs/wiki/database-setup/Setup-PostgreSQL.md b/docs/wiki/database-setup/Setup-PostgreSQL.md new file mode 100644 index 000000000..197aae523 --- /dev/null +++ b/docs/wiki/database-setup/Setup-PostgreSQL.md @@ -0,0 +1,79 @@ +# Setup Postgres database for PowerDNS-Admin + +This guide will show you how to prepare a PostgreSQL database for PowerDNS-Admin. + +We assume the database is installed per your platform's directions (apt, yum, etc). Directions to do this can be found below: + +- https://www.postgresql.org/download/ +- https://www.digitalocean.com/community/tutorials/how-to-install-postgresql-on-ubuntu-22-04-quickstart + +We assume a default configuration and only the postgres user existing. + +## Setup database +The below will create a database called powerdnsadmindb and a user of powerdnsadmin. + +``` +$ sudo su - postgres +$ createuser powerdnsadmin +$ createdb -E UTF8 -l en_US.UTF-8 -O powerdnsadmin -T template0 powerdnsadmindb 'The database for PowerDNS-Admin' +$ psql +postgres=# ALTER ROLE powerdnsadmin WITH PASSWORD 'powerdnsadmin_password'; +``` + +Note: +- Please change the information above (db, user, password) to fit your setup. + +### Setup Remote access to database: +If your database is on a different server postgres does not allow remote connections by default. + +To change this follow the below directions: +``` +[root@host ~]$ sudo su - postgres +# Edit /var/lib/pgsql/data/postgresql.conf +# Change the following line: +listen_addresses = 'localhost' +# to: +listen_addresses = '*' +# Edit /var/lib/pgsql/data/pg_hba.conf +# Add the following lines to the end of the +host all all 0.0.0.0/0 md5 +host all all ::/0 md5 + +[postgres@host ~]$ exit +[root@host ~]$ sudo systemctl restart postgresql +``` + +On debian based systems these files are located in: +``` +/etc/postgresql//main/ +``` + +## Install required packages: +### Red-hat based systems: +TODO: confirm this is correct +``` +sudo yum install postgresql-libs +``` + +### Debian based systems: +``` +apt install python3-psycopg2 +``` + +## Known Issues: + +** To fill in ** + + +## Docker (TODO: to move to docker docs) +TODO: Setup a local Docker postgres database ready to go (should probably move to the top). +``` +docker run --name pdnsadmin-test -e BIND_ADDRESS=0.0.0.0 +-e SECRET_KEY='a-very-secret-key' +-e PORT='9191' +-e SQLA_DB_USER='powerdns_admin_user' +-e SQLA_DB_PASSWORD='exceptionallysecure' +-e SQLA_DB_HOST='192.168.0.100' +-e SQLA_DB_NAME='powerdns_admin_test' +-v /data/node_modules:/var/www/powerdns-admin/node_modules -d -p 9191:9191 ixpict/powerdns-admin-pgsql:latest +``` diff --git a/docs/wiki/debug/build-process.md b/docs/wiki/debug/build-process.md new file mode 100644 index 000000000..28f1cfede --- /dev/null +++ b/docs/wiki/debug/build-process.md @@ -0,0 +1,61 @@ +This discribes how to debug the buildprocess + + +docker-compose.yml + +``` +version: "3" +services: + app: + image: powerdns/custom + container_name: powerdns + restart: always + build: + context: git + dockerfile: docker/Dockerfile + network_mode: "host" + logging: + driver: json-file + options: + max-size: 50m + environment: + - BIND_ADDRESS=127.0.0.1:8082 + - SECRET_KEY='VerySecret' + - SQLALCHEMY_DATABASE_URI=mysql://pdnsadminuser:password@127.0.0.1/powerdnsadmin + - GUNICORN_TIMEOUT=60 + - GUNICORN_WORKERS=2 + - GUNICORN_LOGLEVEL=DEBUG + - OFFLINE_MODE=False + - CSRF_COOKIE_SECURE=False +``` + +Create a git folder in the location of the `docker-compose.yml` and clone the repo into it + +``` +mkdir git +cd git +git clone https://github.com/PowerDNS-Admin/PowerDNS-Admin.git . +``` + +In case you are behind an SSL Filter like me, you can add the following to each stage of the `git/docker/Dockerfile` + +This installs the command `update-ca-certificates` from the alpine repo and adds an ssl cert to the trust chain, make sure you are getting the right version in case the base image version changes + +``` +RUN mkdir /tmp-pkg && cd /tmp-pkg && wget http://dl-cdn.alpinelinux.org/alpine/v3.17/main/x86_64/ca-certificates-20220614-r4.apk && apk add --allow-untrusted --no-network --no-cache /tmp-pkg/ca-certificates-20220614-r4.apk || true +RUN rm -rf /tmp/pkg +COPY MyCustomCerts.crt /usr/local/share/ca-certificates/MyCustomCerts.crt +RUN update-ca-certificates +COPY pip.conf /etc/pip.conf +``` + +`MyCustomCerts.crt` and `pip.conf` have to be placed inside the `git` folder. + +The content of `pip.conf` is: + +``` +[global] +cert = /usr/local/share/ca-certificates/MyCustomCerts.crt +``` + +For easier debugging you can change the `CMD` of the `Dockerfile` to `CMD ["tail","-f", "/dev/null"]` though I expect you to be fluent in Docker in case you wish to debug \ No newline at end of file diff --git a/docs/wiki/features/DynDNS2.md b/docs/wiki/features/DynDNS2.md new file mode 100644 index 000000000..b59972719 --- /dev/null +++ b/docs/wiki/features/DynDNS2.md @@ -0,0 +1,16 @@ +Usage: +IPv4: http://user:pass@yournameserver.yoursite.tld/nic/update?hostname=record.domain.tld&myip=127.0.0.1 +IPv6: http://user:pass@yournameserver.yoursite.tld/nic/update?hostname=record.domain.tld&myip=::1 +Multiple IPs: http://user:pass@yournameserver.yoursite.tld/nic/update?hostname=record.domain.tld&myip=127.0.0.1,127.0.0.2,::1,::2 + +Notes: +- user needs to be a LOCAL user, not LDAP etc +- user must have already logged-in +- user needs to be added to Domain Access Control list of domain.tld - admin status (manage all) does not suffice +- record has to exist already - unless on-demand creation is allowed +- ipv4 address in myip field will change A record +- ipv6 address in myip field will change AAAA record +- use commas to separate multiple IP addresses in the myip field, mixing v4 & v6 is allowed + +DynDNS also works without authentication header (user:pass@) when already authenticated via session cookie from /login, even with external auth like LDAP. +However Domain Access Control restriction still applies. \ No newline at end of file diff --git a/docs/wiki/images/readme_screenshots/fullscreen-dashboard.png b/docs/wiki/images/readme_screenshots/fullscreen-dashboard.png new file mode 100644 index 000000000..828fc48e2 Binary files /dev/null and b/docs/wiki/images/readme_screenshots/fullscreen-dashboard.png differ diff --git a/docs/wiki/images/readme_screenshots/fullscreen-domaincreate.png b/docs/wiki/images/readme_screenshots/fullscreen-domaincreate.png new file mode 100644 index 000000000..ceb2d009d Binary files /dev/null and b/docs/wiki/images/readme_screenshots/fullscreen-domaincreate.png differ diff --git a/docs/wiki/images/readme_screenshots/fullscreen-domainmanage.png b/docs/wiki/images/readme_screenshots/fullscreen-domainmanage.png new file mode 100644 index 000000000..998bf9e4f Binary files /dev/null and b/docs/wiki/images/readme_screenshots/fullscreen-domainmanage.png differ diff --git a/docs/wiki/images/readme_screenshots/fullscreen-login.png b/docs/wiki/images/readme_screenshots/fullscreen-login.png new file mode 100644 index 000000000..4d95472c3 Binary files /dev/null and b/docs/wiki/images/readme_screenshots/fullscreen-login.png differ diff --git a/docs/wiki/images/webui/create.jpg b/docs/wiki/images/webui/create.jpg new file mode 100644 index 000000000..3707735be Binary files /dev/null and b/docs/wiki/images/webui/create.jpg differ diff --git a/docs/wiki/images/webui/index.jpg b/docs/wiki/images/webui/index.jpg new file mode 100644 index 000000000..33f0dedea Binary files /dev/null and b/docs/wiki/images/webui/index.jpg differ diff --git a/docs/wiki/images/webui/login.jpg b/docs/wiki/images/webui/login.jpg new file mode 100644 index 000000000..08bb3c185 Binary files /dev/null and b/docs/wiki/images/webui/login.jpg differ diff --git a/docs/wiki/install/Architecture.png b/docs/wiki/install/Architecture.png new file mode 100644 index 000000000..04440be3d Binary files /dev/null and b/docs/wiki/install/Architecture.png differ diff --git a/docs/wiki/install/General.md b/docs/wiki/install/General.md new file mode 100644 index 000000000..f0823c851 --- /dev/null +++ b/docs/wiki/install/General.md @@ -0,0 +1,32 @@ +# General installation + +## PowerDNS-Admin Architecture + +![PowerDNS-Admin Component Layout](Architecture.png) + +A PowerDNS-Admin installation includes four main components: +- PowerDNS-Admin Database +- PowerDNS-Admin Application Server +- PowerDNS-Admin Frontend Web server +- PowerDNS server that + +All 3 components can be installed on one server or if your installation is large enough or for security reasons can be split across multiple servers. + +## Requirements for PowerDNS-Admin: +- A linux based system. Others (Arch-based for example) may work but are currently not tested. + - Ubuntu versions tested: + - To fill in + - Red hat versions tested: + - To fill in + - Python versions tested: + - 3.6 + - 3.7 + - 3.8 + - 3.9 + - 3.10 + - 3.11 - Failing due to issue with python3-saml later than 1.12.0 +- A database for PowerDNS-Admin, if you are using a database for PowerDNS itself this must be separate to that database. The currently supported databases are: + - MySQL + - PostgreSQL + - SQLite +- A PowerDNS server that PowerDNS-Admin will manage. diff --git a/docs/wiki/install/Running-PowerDNS-Admin-as-a-service-(Systemd).md b/docs/wiki/install/Running-PowerDNS-Admin-as-a-service-(Systemd).md new file mode 100644 index 000000000..f69373618 --- /dev/null +++ b/docs/wiki/install/Running-PowerDNS-Admin-as-a-service-(Systemd).md @@ -0,0 +1,72 @@ +*** +**WARNING** +This just uses the development server for testing purposes. For production environments you should probably go with a more robust solution, like [gunicorn](web-server/Running-PowerDNS-Admin-with-Systemd,-Gunicorn--and--Nginx.md) or a WSGI server. +*** + +### Following example shows a systemd unit file that can run PowerDNS-Admin + +You shouldn't run PowerDNS-Admin as _root_, so let's start of with the user/group creation that will later run PowerDNS-Admin: + +Create a new group for PowerDNS-Admin: + +> sudo groupadd powerdnsadmin + +Create a user for PowerDNS-Admin: + +> sudo useradd --system -g powerdnsadmin powerdnsadmin + +_`--system` creates a user without login-shell and password, suitable for running system services._ + +Create new systemd service file: + +> sudo vim /etc/systemd/system/powerdns-admin.service + +General example: +``` +[Unit] +Description=PowerDNS-Admin +After=network.target + +[Service] +Type=simple +User=powerdnsadmin +Group=powerdnsadmin +ExecStart=/opt/web/powerdns-admin/flask/bin/python ./run.py +WorkingDirectory=/opt/web/powerdns-admin +Restart=always + +[Install] +WantedBy=multi-user.target +``` + +Debian example: +``` +[Unit] +Description=PowerDNS-Admin +After=network.target + +[Service] +Type=simple +User=powerdnsadmin +Group=powerdnsadmin +Environment=PATH=/opt/web/powerdns-admin/flask/bin +ExecStart=/opt/web/powerdns-admin/flask/bin/python /opt/web/powerdns-admin/run.py +WorkingDirectory=/opt/web/powerdns-admin +Restart=always + +[Install] +WantedBy=multi-user.target +``` +Before starting the service, we need to make sure that the new user can work on the files in the PowerDNS-Admin folder: +> chown -R powerdnsadmin:powerdnsadmin /opt/web/powerdns-admin + +After saving the file, we need to reload the systemd daemon: +> sudo systemctl daemon-reload + +We can now try to start the service: +> sudo systemctl start powerdns-admin + +If you would like to start PowerDNS-Admin automagically at startup enable the service: +> systemctl enable powerdns-admin + +Should the service not be up by now, consult your syslog. Generally this will be a file permission issue, or python not finding it's modules. See the Debian unit example to see how you can use systemd in a python `virtualenv` \ No newline at end of file diff --git a/docs/wiki/install/Running-PowerDNS-Admin-on-Centos-7.md b/docs/wiki/install/Running-PowerDNS-Admin-on-Centos-7.md new file mode 100644 index 000000000..cee272c07 --- /dev/null +++ b/docs/wiki/install/Running-PowerDNS-Admin-on-Centos-7.md @@ -0,0 +1,83 @@ +# Installing PowerDNS-Admin on CentOS 7 + +``` +NOTE: If you are logged in as User and not root, add "sudo", or get root by sudo -i. +``` + +## Install required packages: +### Install needed repositories: + +``` +yum install epel-release +yum install https://repo.ius.io/ius-release-el7.rpm https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +``` + +### Install Python 3.6 and tools: +First remove python 3.4 if installed +``` +yum remove python34* +yum autoremove +``` + +``` +yum install python3 python3-devel python3-pip +pip3.6 install -U pip +pip install -U virtualenv +``` + +### Install required packages for building python libraries from requirements.txt file: +``` +yum install gcc openldap-devel xmlsec1-devel xmlsec1-openssl libtool-ltdl-devel +``` + +### Install yarn to build asset files + Nodejs 14: +``` +curl -sL https://rpm.nodesource.com/setup_14.x | bash - +curl -sL https://dl.yarnpkg.com/rpm/yarn.repo -o /etc/yum.repos.d/yarn.repo +yum install yarn +``` + +### Checkout source code and create virtualenv: +NOTE: Please adjust `/opt/web/powerdns-admin` to your local web application directory + +``` +git clone https://github.com/PowerDNS-Admin/PowerDNS-Admin.git /opt/web/powerdns-admin +cd /opt/web/powerdns-admin +virtualenv -p python3 flask +``` + +Activate your python3 environment and install libraries: +``` +. ./flask/bin/activate +pip install python-dotenv +pip install -r requirements.txt +``` + +## Running PowerDNS-Admin: +NOTE: The default config file is located at `./powerdnsadmin/default_config.py`. If you want to load another one, please set the `FLASK_CONF` environment variable. E.g. +```bash +export FLASK_CONF=../configs/development.py +``` + +### Create the database schema: +``` +export FLASK_APP=powerdnsadmin/__init__.py +flask db upgrade +``` + +**Also, we should generate asset files:** +``` +yarn install --pure-lockfile +flask assets build +``` + +**Now you can run PowerDNS-Admin by command:** +``` +./run.py +``` + +Open your web browser and access to `http://localhost:9191` to visit PowerDNS-Admin web interface. Register an user. The first user will be in Administrator role. + +At the first time you login into the PDA UI, you will be redirected to setting page to configure the PDNS API information. + +_**Note:**_ For production environment, i would recommend you to run PowerDNS-Admin with gunicorn or uwsgi instead of flask's built-in web server, take a look at WIKI page to see how to configure them. diff --git a/docs/wiki/install/Running-PowerDNS-Admin-on-Docker.md b/docs/wiki/install/Running-PowerDNS-Admin-on-Docker.md new file mode 100644 index 000000000..1e3ef50c1 --- /dev/null +++ b/docs/wiki/install/Running-PowerDNS-Admin-on-Docker.md @@ -0,0 +1,14 @@ +# Installation on docker + +The Docker image is powerdnsadmin/pda-legacy available on [DockerHub](https://hub.docker.com/r/powerdnsadmin/pda-legacy) + +The supported environment variables to configure the container are located [here](../configuration/Environment-variables.md). + +You can run the container and expose the web server on port 9191 using: +```bash +docker run -d \ + -e SECRET_KEY='a-very-secret-key' \ + -v pda-data:/data \ + -p 9191:80 \ + powerdnsadmin/pda-legacy:latest +``` diff --git a/docs/wiki/install/Running-PowerDNS-Admin-on-Fedora-23.md b/docs/wiki/install/Running-PowerDNS-Admin-on-Fedora-23.md new file mode 100644 index 000000000..ca84460eb --- /dev/null +++ b/docs/wiki/install/Running-PowerDNS-Admin-on-Fedora-23.md @@ -0,0 +1 @@ +Please refer to CentOS guide: [Running-PowerDNS-Admin-on-Centos-7](Running-PowerDNS-Admin-on-Centos-7.md) \ No newline at end of file diff --git a/docs/wiki/install/Running-PowerDNS-Admin-on-Fedora-30.md b/docs/wiki/install/Running-PowerDNS-Admin-on-Fedora-30.md new file mode 100644 index 000000000..53f1c7cec --- /dev/null +++ b/docs/wiki/install/Running-PowerDNS-Admin-on-Fedora-30.md @@ -0,0 +1,82 @@ +``` +NOTE: If you are logged in as User and not root, add "sudo", or get root by sudo -i. + Normally under centos you are anyway mostly root. +``` +
+ +## Install required packages + +**Install Python and requirements** +```bash +dnf install python37 python3-devel python3-pip +``` +**Install Backend and Environment prerequisites** +```bash +dnf install mariadb-devel mariadb-common openldap-devel xmlsec1-devel xmlsec1-openssl libtool-ltdl-devel +``` +**Install Development tools** +```bash +dnf install gcc gc make +``` +**Install PIP** +```bash +pip3.7 install -U pip +``` +**Install Virtual Environment** +```bash +pip install -U virtualenv +``` +**Install Yarn for building NodeJS asset files:** +```bash +dnf install npm +npm install yarn -g +``` + +## Clone the PowerDNS-Admin repository to the installation path: +```bash +cd /opt/web/ +git clone https://github.com/PowerDNS-Admin/PowerDNS-Admin.git powerdns-admin +``` + +**Prepare the Virtual Environment:** +```bash +cd /opt/web/powerdns-admin +virtualenv -p python3 flask +``` +**Activate the Python Environment and install libraries** +```bash +. ./flask/bin/activate +pip install python-dotenv +pip install -r requirements.txt +``` + +## Running PowerDNS-Admin + +NOTE: The default config file is located at `./powerdnsadmin/default_config.py`. If you want to load another one, please set the `FLASK_CONF` environment variable. E.g. +```bash +export FLASK_CONF=../configs/development.py +``` + +**Then create the database schema by running:** +``` +(flask) [khanh@localhost powerdns-admin] export FLASK_APP=powerdnsadmin/__init__.py +(flask) [khanh@localhost powerdns-admin] flask db upgrade +``` + +**Also, we should generate asset files:** +``` +(flask) [khanh@localhost powerdns-admin] yarn install --pure-lockfile +(flask) [khanh@localhost powerdns-admin] flask assets build +``` + +**Now you can run PowerDNS-Admin by command:** +``` +(flask) [khanh@localhost powerdns-admin] ./run.py +``` + +Open your web browser and access to `http://localhost:9191` to visit PowerDNS-Admin web interface. Register an user. The first user will be in Administrator role. + +At the first time you login into the PDA UI, you will be redirected to setting page to configure the PDNS API information. + +_**Note:**_ For production environment, i recommend to run PowerDNS-Admin with WSGI over Apache instead of flask's built-in web server... + Take a look at [WSGI Apache Example](web-server/WSGI-Apache-example#fedora) WIKI page to see how to configure it. \ No newline at end of file diff --git a/docs/wiki/install/Running-PowerDNS-Admin-on-Ubuntu-or-Debian.md b/docs/wiki/install/Running-PowerDNS-Admin-on-Ubuntu-or-Debian.md new file mode 100644 index 000000000..ad51c2b5d --- /dev/null +++ b/docs/wiki/install/Running-PowerDNS-Admin-on-Ubuntu-or-Debian.md @@ -0,0 +1,90 @@ +# Installing PowerDNS-Admin on Ubuntu or Debian based systems + +First setup your database accordingly: +[Database Setup](../database-setup/README.md) + +## Install required packages: + +### Install required packages for building python libraries from requirements.txt file + +For Debian 11 (bullseye) and above: +```bash +sudo apt install -y python3-dev git libsasl2-dev libldap2-dev python3-venv libmariadb-dev pkg-config build-essential curl libpq-dev +``` +Older systems might also need the following: +```bash +sudo apt install -y libssl-dev libxml2-dev libxslt1-dev libxmlsec1-dev libffi-dev apt-transport-https virtualenv +``` + +### Install NodeJs + +```bash +curl -sL https://deb.nodesource.com/setup_14.x | sudo bash - +sudo apt install -y nodejs +``` + +### Install yarn to build asset files +For Debian 11 (bullseye) and above: +```bash +curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | gpg --dearmor | sudo tee /usr/share/keyrings/yarnkey.gpg >/dev/null +echo "deb [signed-by=/usr/share/keyrings/yarnkey.gpg] https://dl.yarnpkg.com/debian stable main" | sudo tee /etc/apt/sources.list.d/yarn.list +sudo apt update && sudo apt install -y yarn +``` +For older Debian systems: +```bash +sudo curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - +echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list +sudo apt update -y +sudo apt install -y yarn +``` + +### Checkout source code and create virtualenv +_**Note:**_ Please adjust `/opt/web/powerdns-admin` to your local web application directory + +```bash +git clone https://github.com/PowerDNS-Admin/PowerDNS-Admin.git /opt/web/powerdns-admin +cd /opt/web/powerdns-admin +python3 -mvenv ./venv +``` + +Activate your python3 environment and install libraries: + +```bash +source ./venv/bin/activate +pip install --upgrade pip +pip install -r requirements.txt +``` +## Running PowerDNS-Admin + +Create PowerDNS-Admin config file and make the changes necessary for your use case. Make sure to change `SECRET_KEY` to a long random string that you generated yourself ([see Flask docs](https://flask.palletsprojects.com/en/1.1.x/config/#SECRET_KEY)), do not use the pre-defined one. E.g.: + +```bash +cp /opt/web/powerdns-admin/configs/development.py /opt/web/powerdns-admin/configs/production.py +vim /opt/web/powerdns-admin/configs/production.py +export FLASK_CONF=../configs/production.py +``` + +Do the DB migration + +```bash +export FLASK_APP=powerdnsadmin/__init__.py +flask db upgrade +``` + +Then generate asset files + +```bash +yarn install --pure-lockfile +flask assets build +``` + +Now you can run PowerDNS-Admin by command + +```bash +./run.py +``` + +This is good for testing, but for production usage, you should use gunicorn or uwsgi. See [Running PowerDNS Admin with Systemd, Gunicorn and Nginx](../web-server/Running-PowerDNS-Admin-with-Systemd-Gunicorn-and-Nginx.md) for instructions. + + +From here you can now follow the [Getting started guide](../configuration/Getting-started.md). diff --git a/docs/wiki/install/Running-on-FreeBSD.md b/docs/wiki/install/Running-on-FreeBSD.md new file mode 100644 index 000000000..b37e9c3a1 --- /dev/null +++ b/docs/wiki/install/Running-on-FreeBSD.md @@ -0,0 +1,102 @@ +On [FreeBSD](https://www.freebsd.org/), most software is installed using `pkg`. You can always build from source with the Ports system. This method uses as many binary ports as possible, and builds some python packages from source. It installs all the required runtimes in the global system (e.g., python, node, yarn) and then builds a virtual python environment in `/opt/python`. Likewise, it installs powerdns-admin in `/opt/powerdns-admin`. + +### Build an area to host files + +```bash +mkdir -p /opt/python +``` + +### Install prerequisite runtimes: python, node, yarn + +```bash +sudo pkg install git python3 curl node12 yarn-node12 +sudo pkg install libxml2 libxslt pkgconf py37-xmlsec py37-cffi py37-ldap +``` + +## Check Out Source Code +_**Note:**_ Please adjust `/opt/powerdns-admin` to your local web application directory + +```bash +git clone https://github.com/PowerDNS-Admin/PowerDNS-Admin.git /opt/powerdns-admin +cd /opt/powerdns-admin +``` + +## Make Virtual Python Environment + +Make a virtual environment for python. Activate your python3 environment and install libraries. It's easier to install some python libraries as system packages, so we add the `--system-site-packages` option to pull those in. + +> Note: I couldn't get `python-ldap` to install correctly, and I don't need it. I commented out the `python-ldap` line in `requirements.txt` and it all built and installed correctly. If you don't intend to use LDAP authentication, you'll be fine. If you need LDAP authentication, it probably won't work. + +```bash +python3 -m venv /web/python --system-site-packages +source /web/python/bin/activate +/web/python/bin/python3 -m pip install --upgrade pip wheel +# this command comments out python-ldap +perl -pi -e 's,^python-ldap,\# python-ldap,' requirements.txt +pip3 install -r requirements.txt +``` + +## Configuring PowerDNS-Admin + +NOTE: The default config file is located at `./powerdnsadmin/default_config.py`. If you want to load another one, please set the `FLASK_CONF` environment variable. E.g. +```bash +cp configs/development.py /opt/powerdns-admin/production.py +export FLASK_CONF=/opt/powerdns-admin/production.py +``` + +### Update the Flask config + +Edit your flask python configuration. Insert values for the database server, user name, password, etc. + +```bash +vim $FLASK_CONF +``` + +Edit the values below to something sensible +```python +### BASIC APP CONFIG +SALT = '[something]' +SECRET_KEY = '[something]' +BIND_ADDRESS = '0.0.0.0' +PORT = 9191 +OFFLINE_MODE = False + +### DATABASE CONFIG +SQLA_DB_USER = 'pda' +SQLA_DB_PASSWORD = 'changeme' +SQLA_DB_HOST = '127.0.0.1' +SQLA_DB_NAME = 'pda' +SQLALCHEMY_TRACK_MODIFICATIONS = True +``` + +Be sure to uncomment one of the lines like `SQLALCHEMY_DATABASE_URI`. + +### Initialise the database + +```bash +export FLASK_APP=powerdnsadmin/__init__.py +flask db upgrade +``` + +### Build web assets + +```bash +yarn install --pure-lockfile +flask assets build +``` + +## Running PowerDNS-Admin + +Now you can run PowerDNS-Admin by command + +```bash +./run.py +``` + +Open your web browser and go to `http://localhost:9191` to visit PowerDNS-Admin web interface. Register a user. The first user will be in the Administrator role. + +### Running at startup + +This is good for testing, but for production usage, you should use gunicorn or uwsgi. See [Running PowerDNS Admin with Systemd, Gunicorn and Nginx](../web-server/Running-PowerDNS-Admin-with-Systemd,-Gunicorn--and--Nginx.md) for instructions. + +The right approach long-term is to create a startup script in `/usr/local/etc/rc.d` and enable it through `/etc/rc.conf`. \ No newline at end of file diff --git a/docs/wiki/web-server/Running-Docker-Apache-Reverseproxy.md b/docs/wiki/web-server/Running-Docker-Apache-Reverseproxy.md new file mode 100644 index 000000000..e757ab7f9 --- /dev/null +++ b/docs/wiki/web-server/Running-Docker-Apache-Reverseproxy.md @@ -0,0 +1,73 @@ +This describes how to run Apache2 on the host system with a reverse proxy directing to the docker container + +This is usually used to add ssl certificates and prepend a subdirectory + +The network_mode host settings is not neccessary but used for ldap availability in this case + + +docker-compose.yml + +``` +version: "3" +services: + app: + image: powerdnsadmin/pda-legacy:latest + container_name: powerdns + restart: always + network_mode: "host" + logging: + driver: json-file + options: + max-size: 50m + environment: + - BIND_ADDRESS=127.0.0.1:8082 + - SECRET_KEY='NotVerySecret' + - SQLALCHEMY_DATABASE_URI=mysql://pdnsadminuser:password@127.0.0.1/powerdnsadmin + - GUNICORN_TIMEOUT=60 + - GUNICORN_WORKERS=2 + - GUNICORN_LOGLEVEL=DEBUG + - OFFLINE_MODE=False + - CSRF_COOKIE_SECURE=False + - SCRIPT_NAME=/powerdns +``` + +After running the Container create the static directory and populate + +``` +mkdir -p /var/www/powerdns +docker cp powerdns:/app/powerdnsadmin/static /var/www/powerdns/ +chown -R root:www-data /var/www/powerdns +``` + +Adjust the static reference, static/assets/css has a hardcoded reference + +``` +sed -i 's/\/static/\/powerdns\/static/' /var/www/powerdns/static/assets/css/* +``` + +Apache Config: + +You can set the SCRIPT_NAME environment using Apache as well, once is sufficient though + +``` + + RequestHeader set X-Forwarded-Proto "https" + RequestHeader set X-Forwarded-Port "443" + RequestHeader set SCRIPT_NAME "/powerdns" + ProxyPreserveHost On + + + ProxyPass /powerdns/static ! + ProxyPass /powerdns http://127.0.0.1:8082/powerdns + ProxyPassReverse /powerdns http://127.0.0.1:8082/powerdns + + Alias /powerdns/static "/var/www/powerdns/static" + + + Options None + #Options +Indexes + AllowOverride None + Order allow,deny + Allow from all + +``` \ No newline at end of file diff --git a/docs/wiki/web-server/Running-PowerDNS-Admin-with-Systemd,-Gunicorn-and-Apache.md b/docs/wiki/web-server/Running-PowerDNS-Admin-with-Systemd,-Gunicorn-and-Apache.md new file mode 100644 index 000000000..a2d4fa2b4 --- /dev/null +++ b/docs/wiki/web-server/Running-PowerDNS-Admin-with-Systemd,-Gunicorn-and-Apache.md @@ -0,0 +1,97 @@ +Following is an example showing how to run PowerDNS-Admin with systemd, gunicorn and Apache: + +The systemd and gunicorn setup are the same as for with nginx. This set of configurations assumes you have installed your PowerDNS-Admin under /opt/powerdns-admin and are running with a package-installed gunicorn. + +## Configure systemd service + +`$ sudo vim /etc/systemd/system/powerdns-admin.service` + +``` +[Unit] +Description=PowerDNS web administration service +Requires=powerdns-admin.socket +Wants=network.target +After=network.target mysqld.service postgresql.service slapd.service mariadb.service + +[Service] +PIDFile=/run/powerdns-admin/pid +User=pdnsa +Group=pdnsa +WorkingDirectory=/opt/powerdns-admin +ExecStart=/usr/bin/gunicorn-3.6 --workers 4 --log-level info --pid /run/powerdns-admin/pid --bind unix:/run/powerdns-admin/socket "powerdnsadmin:create_app(config='config.py')" +ExecReload=/bin/kill -s HUP $MAINPID +ExecStop=/bin/kill -s TERM $MAINPID +PrivateTmp=true +Restart=on-failure +RestartSec=10 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target +``` + +`$ sudo vim /etc/systemd/system/powerdns-admin.socket` + +``` +[Unit] +Description=PowerDNS-Admin socket + +[Socket] +ListenStream=/run/powerdns-admin/socket + +[Install] +WantedBy=sockets.target +``` + +`$ sudo vim /etc/tmpfiles.d/powerdns-admin.conf` + +``` +d /run/powerdns-admin 0755 pdnsa pdnsa - +``` + +Then `sudo systemctl daemon-reload; sudo systemctl start powerdns-admin.socket; sudo systemctl enable powerdns-admin.socket` to start the Powerdns-Admin service and make it run on boot. + +## Sample Apache configuration + +This includes SSL redirect. + +``` + + ServerName dnsadmin.company.com + DocumentRoot "/opt/powerdns-admin" + + Options Indexes FollowSymLinks MultiViews + AllowOverride None + Require all granted + + Redirect permanent / https://dnsadmin.company.com/ + + + ServerName dnsadmin.company.com + DocumentRoot "/opt/powerdns-admin/powerdnsadmin" + ## Alias declarations for resources outside the DocumentRoot + Alias /static/ "/opt/powerdns-admin/powerdnsadmin/static/" + Alias /favicon.ico "/opt/powerdns-admin/powerdnsadmin/static/favicon.ico" + + AllowOverride None + Require all granted + + ## Proxy rules + ProxyRequests Off + ProxyPreserveHost On + ProxyPass /static/ ! + ProxyPass /favicon.ico ! + ProxyPass / unix:/var/run/powerdns-admin/socket|http://%{HTTP_HOST}/ + ProxyPassReverse / unix:/var/run/powerdns-admin/socket|http://%{HTTP_HOST}/ + ## SSL directives + SSLEngine on + SSLCertificateFile "/etc/pki/tls/certs/dnsadmin.company.com.crt" + SSLCertificateKeyFile "/etc/pki/tls/private/dnsadmin.company.com.key" + +``` + +## Notes +* The above assumes your installation is under /opt/powerdns-admin +* The hostname is assumed as dnsadmin.company.com +* gunicorn is installed in /usr/bin via a package (as in the case with CentOS/Redhat 7) and you have Python 3.6 installed. If you prefer to use flask then see the systemd configuration for nginx. +* On Ubuntu / Debian systems, you may need to enable the "proxy_http" module with `a2enmod proxy_http` diff --git a/docs/wiki/web-server/Running-PowerDNS-Admin-with-Systemd-Gunicorn-and-Nginx.md b/docs/wiki/web-server/Running-PowerDNS-Admin-with-Systemd-Gunicorn-and-Nginx.md new file mode 100644 index 000000000..57725bc16 --- /dev/null +++ b/docs/wiki/web-server/Running-PowerDNS-Admin-with-Systemd-Gunicorn-and-Nginx.md @@ -0,0 +1,181 @@ +Following is an example showing how to run PowerDNS-Admin with systemd, gunicorn and nginx: + +## Configure PowerDNS-Admin + +Create PowerDNS-Admin config file and make the changes necessary for your use case. Make sure to change `SECRET_KEY` to a long random string that you generated yourself ([see Flask docs](https://flask.palletsprojects.com/en/1.1.x/config/#SECRET_KEY)), do not use the pre-defined one. +``` +$ cp /opt/web/powerdns-admin/configs/development.py /opt/web/powerdns-admin/configs/production.py +$ vim /opt/web/powerdns-admin/configs/production.py +``` + +## Configure systemd service + +`$ sudo vim /etc/systemd/system/powerdns-admin.service` + +``` +[Unit] +Description=PowerDNS-Admin +Requires=powerdns-admin.socket +After=network.target + +[Service] +PIDFile=/run/powerdns-admin/pid +User=pdns +Group=pdns +WorkingDirectory=/opt/web/powerdns-admin +ExecStartPre=+mkdir -p /run/powerdns-admin/ +ExecStartPre=+chown pdns:pdns -R /run/powerdns-admin/ +ExecStart=/usr/local/bin/gunicorn --pid /run/powerdns-admin/pid --bind unix:/run/powerdns-admin/socket 'powerdnsadmin:create_app()' +ExecReload=/bin/kill -s HUP $MAINPID +ExecStop=/bin/kill -s TERM $MAINPID +PrivateTmp=true + +[Install] +WantedBy=multi-user.target +``` + +`$ sudo systemctl edit powerdns-admin.service` + +``` +[Service] +Environment="FLASK_CONF=../configs/production.py" +``` + +`$ sudo vim /etc/systemd/system/powerdns-admin.socket` + +``` +[Unit] +Description=PowerDNS-Admin socket + +[Socket] +ListenStream=/run/powerdns-admin/socket + +[Install] +WantedBy=sockets.target +``` + +`$ sudo vim /etc/tmpfiles.d/powerdns-admin.conf` + +``` +d /run/powerdns-admin 0755 pdns pdns - +``` + +Then `sudo systemctl daemon-reload; sudo systemctl start powerdns-admin.socket; sudo systemctl enable powerdns-admin.socket` to start the Powerdns-Admin service and make it run on boot. + +## Sample nginx configuration +``` +server { + listen *:80; + server_name powerdns-admin.local www.powerdns-admin.local; + + index index.html index.htm index.php; + root /opt/web/powerdns-admin; + access_log /var/log/nginx/powerdns-admin.local.access.log combined; + error_log /var/log/nginx/powerdns-admin.local.error.log; + + client_max_body_size 10m; + client_body_buffer_size 128k; + proxy_redirect off; + proxy_connect_timeout 90; + proxy_send_timeout 90; + proxy_read_timeout 90; + proxy_buffers 32 4k; + proxy_buffer_size 8k; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_headers_hash_bucket_size 64; + + location ~ ^/static/ { + include /etc/nginx/mime.types; + root /opt/web/powerdns-admin/powerdnsadmin; + + location ~* \.(jpg|jpeg|png|gif)$ { + expires 365d; + } + + location ~* ^.+.(css|js)$ { + expires 7d; + } + } + + location / { + proxy_pass http://unix:/run/powerdns-admin/socket; + proxy_read_timeout 120; + proxy_connect_timeout 120; + proxy_redirect off; + } + +} +``` + +
+Sample Nginx-Configuration for SSL + +* Im binding this config to every dns-name with default_server... +* but you can remove it and set your server_name. + +``` +server { + listen 80 default_server; + server_name ""; + return 301 https://$http_host$request_uri; +} + +server { + listen 443 ssl http2 default_server; + server_name _; + index index.html index.htm; + error_log /var/log/nginx/error_powerdnsadmin.log error; + access_log off; + + ssl_certificate path_to_your_fullchain_or_cert; + ssl_certificate_key path_to_your_key; + ssl_dhparam path_to_your_dhparam.pem; + ssl_prefer_server_ciphers on; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_session_cache shared:SSL:10m; + + client_max_body_size 10m; + client_body_buffer_size 128k; + proxy_redirect off; + proxy_connect_timeout 90; + proxy_send_timeout 90; + proxy_read_timeout 90; + proxy_buffers 32 4k; + proxy_buffer_size 8k; + proxy_set_header Host $http_host; + proxy_set_header X-Scheme $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_headers_hash_bucket_size 64; + + location ~ ^/static/ { + include mime.types; + root /opt/web/powerdns-admin/powerdnsadmin; + location ~* \.(jpg|jpeg|png|gif)$ { expires 365d; } + location ~* ^.+.(css|js)$ { expires 7d; } + } + + location ~ ^/upload/ { + include mime.types; + root /opt/web/powerdns-admin; + location ~* \.(jpg|jpeg|png|gif)$ { expires 365d; } + location ~* ^.+.(css|js)$ { expires 7d; } + } + + location / { + proxy_pass http://unix:/run/powerdns-admin/socket; + proxy_read_timeout 120; + proxy_connect_timeout 120; + proxy_redirect http:// $scheme://; + } +} +``` +
+ +## Note +* `/opt/web/powerdns-admin` is the path to your powerdns-admin web directory +* Make sure you have installed gunicorn in flask virtualenv already. +* `powerdns-admin.local` just an example of your web domain name. \ No newline at end of file diff --git a/docs/wiki/web-server/Supervisord-example.md b/docs/wiki/web-server/Supervisord-example.md new file mode 100644 index 000000000..11cebc8b1 --- /dev/null +++ b/docs/wiki/web-server/Supervisord-example.md @@ -0,0 +1,18 @@ +Following is an example showing how to run PowerDNS-Admin with supervisord + +Create supervisord program config file +``` +$ sudo vim /etc/supervisor.d/powerdnsadmin.conf +``` + +``` +[program:powerdnsadmin] +command=/opt/web/powerdns-admin/flask/bin/python ./run.py +stdout_logfile=/var/log/supervisor/program_powerdnsadmin.log +stderr_logfile=/var/log/supervisor/program_powerdnsadmin.error +autostart=true +autorestart=true +directory=/opt/web/powerdns-admin +``` + +Then `sudo supervisorctl start powerdnsadmin` to start the Powerdns-Admin service. \ No newline at end of file diff --git a/docs/wiki/web-server/Systemd-example.md b/docs/wiki/web-server/Systemd-example.md new file mode 100644 index 000000000..d7f738b39 --- /dev/null +++ b/docs/wiki/web-server/Systemd-example.md @@ -0,0 +1,50 @@ +## Configure systemd service + +This example uses package-installed gunicorn (instead of flask-installed) and PowerDNS-Admin installed under /opt/powerdns-admin + +`$ sudo vim /etc/systemd/system/powerdns-admin.service` + +``` +[Unit] +Description=PowerDNS web administration service +Requires=powerdns-admin.socket +Wants=network.target +After=network.target mysqld.service postgresql.service slapd.service mariadb.service + +[Service] +PIDFile=/run/powerdns-admin/pid +User=pdnsa +Group=pdnsa +WorkingDirectory=/opt/powerdns-admin +ExecStart=/usr/bin/gunicorn-3.6 --workers 4 --log-level info --pid /run/powerdns-admin/pid --bind unix:/run/powerdns-admin/socket "powerdnsadmin:create_app(config='config.py')" +ExecReload=/bin/kill -s HUP $MAINPID +ExecStop=/bin/kill -s TERM $MAINPID +PrivateTmp=true +Restart=on-failure +RestartSec=10 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target +``` + +`$ sudo vim /etc/systemd/system/powerdns-admin.socket` + +``` +[Unit] +Description=PowerDNS-Admin socket + +[Socket] +ListenStream=/run/powerdns-admin/socket + +[Install] +WantedBy=sockets.target +``` + +`$ sudo vim /etc/tmpfiles.d/powerdns-admin.conf` + +``` +d /run/powerdns-admin 0755 pdns pdns - +``` + +Then `sudo systemctl daemon-reload; sudo systemctl start powerdns-admin.socket; sudo systemctl enable powerdns-admin.socket` to start the Powerdns-Admin service and make it run on boot. diff --git a/docs/wiki/web-server/WSGI-Apache-example.md b/docs/wiki/web-server/WSGI-Apache-example.md new file mode 100644 index 000000000..d31e4f778 --- /dev/null +++ b/docs/wiki/web-server/WSGI-Apache-example.md @@ -0,0 +1,100 @@ +How to run PowerDNS-Admin via WSGI and Apache2.4 using mod_wsgi. + +**Note**: You must install mod_wsgi by using pip3 instead of system default mod_wsgi!!! + +### Ubuntu/Debian +```shell +# apt install apache2-dev +# virtualenv -p python3 flask +# source ./flask/bin/activate +(flask) # pip3 install mod-wsgi +(flask) # mod_wsgi-express install-module > /etc/apache2/mods-available/wsgi.load +(flask) # a2enmod wsgi +(flask) # systemctl restart apache2 +``` +### CentOS +```shell +# yum install httpd-devel +# virtualenv -p python3 flask +# source ./flask/bin/activate +(flask) # pip3 install mod-wsgi +(flask) # mod_wsgi-express install-module > /etc/httpd/conf.modules.d/02-wsgi.conf +(flask) # systemctl restart httpd +``` +### Fedora +```bash +# Install Apache's Development interfaces and package requirements +dnf install httpd-devel gcc gc make +virtualenv -p python3 flask +source ./flask/bin/activate +# Install WSGI for HTTPD +pip install mod_wsgi-httpd +# Install WSGI +pip install mod-wsgi +# Enable the module in Apache: +mod_wsgi-express install-module > /etc/httpd/conf.modules.d/02-wsgi.conf +systemctl restart httpd +``` + +Apache vhost configuration; +```apache + + ServerName superawesomedns.foo.bar + ServerAlias [fe80::1] + ServerAdmin webmaster@foo.bar + + SSLEngine On + SSLCertificateFile /some/path/ssl/certs/cert.pem + SSLCertificateKeyFile /some/path/ssl/private/cert.key + + ErrorLog /var/log/apache2/error-superawesomedns.foo.bar.log + CustomLog /var/log/apache2/access-superawesomedns.foo.bar.log combined + + DocumentRoot /srv/vhosts/superawesomedns.foo.bar/ + + WSGIDaemonProcess pdnsadmin user=pdnsadmin group=pdnsadmin threads=5 + WSGIScriptAlias / /srv/vhosts/superawesomedns.foo.bar/powerdnsadmin.wsgi + + # pass BasicAuth on to the WSGI process + WSGIPassAuthorization On + + + WSGIProcessGroup pdnsadmin + WSGIApplicationGroup %{GLOBAL} + + AllowOverride None + Options +ExecCGI +FollowSymLinks + SSLRequireSSL + AllowOverride None + Require all granted + + +``` +**In Fedora, you might want to change the following line:** +```apache +WSGIDaemonProcess pdnsadmin socket-user=apache user=pdnsadmin group=pdnsadmin threads=5 +``` +**And you should add the following line to `/etc/httpd/conf/httpd.conf`:** +```apache +WSGISocketPrefix /var/run/wsgi +``` + +Content of `/srv/vhosts/superawesomedns.foo.bar/powerdnsadmin.wsgi`; +```python +#!/usr/bin/env python3 +import sys +sys.path.insert(0, '/srv/vhosts/superawesomedns.foo.bar') + +from app import app as application +``` +Starting from 0.2 version, the `powerdnsadmin.wsgi` file is slighty different : +```python +#!/usr/bin/env python3 +import sys +sys.path.insert(0, '/srv/vhosts/superawesomedns.foo.bar') + +from powerdnsadmin import create_app +application = create_app() +``` + +(this implies that the pdnsadmin user/group exists, and that you have mod_wsgi loaded) \ No newline at end of file diff --git a/docs/wiki/web-server/uWSGI-example.md b/docs/wiki/web-server/uWSGI-example.md new file mode 100644 index 000000000..db15d0386 --- /dev/null +++ b/docs/wiki/web-server/uWSGI-example.md @@ -0,0 +1,56 @@ +# uWSGI Example + +This guide will show you how to run PowerDNS-Admin via uWSGI and nginx. This guide was written using Debian 8 with the following software versions: +- nginx 1.6.2 +- uwsgi 2.0.7-debian +- python 2.7.9 + +## Software installation: + +1. apt install the following packages: + - `uwsgi` + - `uwsgi-plugin-python` + - `nginx` + +## Step-by-step instructions +1. Create a uWSGI .ini in `/etc/uwsgi/apps-enabled` with the following contents, making sure to replace the chdir, pythonpath and virtualenv directories with where you've installed PowerDNS-Admin: + ```ini + [uwsgi] + plugins = python27 + + uid=www-data + gid=www-data + + chdir = /opt/pdns-admin/PowerDNS-Admin/ + pythonpath = /opt/pdns-admin/PowerDNS-Admin/ + virtualenv = /opt/pdns-admin/PowerDNS-Admin/flask + + mount = /pdns=powerdnsadmin:create_app() + manage-script-name = true + + vacuum = true + harakiri = 20 + buffer-size = 32768 + post-buffering = 8192 + socket = /run/uwsgi/app/%n/%n.socket + chown-socket = www-data + pidfile = /run/uwsgi/app/%n/%n.pid + + daemonize = /var/log/uwsgi/app/%n.log + enable-threads + ``` +2. Add the following configuration to your nginx config: + ```nginx + location / { try_files $uri @pdns_admin; } + + location @pdns_admin { + include uwsgi_params; + uwsgi_pass unix:/run/uwsgi/app/pdns-admin/pdns-admin.socket; + } + + location /pdns/static/ { + alias /opt/pdns-admin/PowerDNS-Admin/app/static/; + } + ``` +3. Restart nginx and uwsgi. +4. You're done and PowerDNS-Admin will now be available via nginx. \ No newline at end of file diff --git a/migrations/env.py b/migrations/env.py index 6a10e6d55..4742e1418 100755 --- a/migrations/env.py +++ b/migrations/env.py @@ -19,7 +19,7 @@ # target_metadata = mymodel.Base.metadata from flask import current_app config.set_main_option('sqlalchemy.url', - current_app.config.get('SQLALCHEMY_DATABASE_URI')) + current_app.config.get('SQLALCHEMY_DATABASE_URI').replace("%","%%")) target_metadata = current_app.extensions['migrate'].db.metadata # other values from the config, defined by the needs of env.py, diff --git a/migrations/versions/1274ed462010_remove_all_settings_in_the_db.py b/migrations/versions/31a4ed468b18_remove_all_settings_in_the_db.py similarity index 100% rename from migrations/versions/1274ed462010_remove_all_settings_in_the_db.py rename to migrations/versions/31a4ed468b18_remove_all_settings_in_the_db.py diff --git a/migrations/versions/3f76448bb6de_add_user_confirmed_column.py b/migrations/versions/3f76448bb6de_add_user_confirmed_column.py index d1b6d2910..6dcf16af4 100644 --- a/migrations/versions/3f76448bb6de_add_user_confirmed_column.py +++ b/migrations/versions/3f76448bb6de_add_user_confirmed_column.py @@ -18,8 +18,12 @@ def upgrade(): with op.batch_alter_table('user') as batch_op: batch_op.add_column( - sa.Column('confirmed', sa.Boolean(), nullable=False, + sa.Column('confirmed', sa.Boolean(), nullable=True, default=False)) + with op.batch_alter_table('user') as batch_op: + user = sa.sql.table('user', sa.sql.column('confirmed')) + batch_op.execute(user.update().values(confirmed=False)) + batch_op.alter_column('confirmed', nullable=False, existing_type=sa.Boolean(), existing_nullable=True, existing_server_default=False) def downgrade(): diff --git a/migrations/versions/6ea7dc05f496_fix_typo_in_history_detail.py b/migrations/versions/6ea7dc05f496_fix_typo_in_history_detail.py new file mode 100644 index 000000000..6a039949b --- /dev/null +++ b/migrations/versions/6ea7dc05f496_fix_typo_in_history_detail.py @@ -0,0 +1,46 @@ +"""Fix typo in history detail + +Revision ID: 6ea7dc05f496 +Revises: fbc7cf864b24 +Create Date: 2022-05-10 10:16:58.784497 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '6ea7dc05f496' +down_revision = 'fbc7cf864b24' +branch_labels = None +depends_on = None + +history_table = sa.sql.table('history', + sa.Column('detail', sa.Text), + ) + + +def upgrade(): + op.execute( + history_table.update() + .where(history_table.c.detail.like('%"add_rrests":%')) + .values({ + 'detail': sa.func.replace( + sa.func.replace(history_table.c.detail, '"add_rrests":', '"add_rrsets":'), + '"del_rrests":', '"del_rrsets":' + ) + }) + ) + + +def downgrade(): + op.execute( + history_table.update() + .where(history_table.c.detail.like('%"add_rrsets":%')) + .values({ + 'detail': sa.func.replace( + sa.func.replace(history_table.c.detail, '"add_rrsets":', '"add_rrests":'), + '"del_rrsets":', '"del_rrests":' + ) + }) + ) diff --git a/migrations/versions/787bdba9e147_init_db.py b/migrations/versions/787bdba9e147_init_db.py index aa781de89..c4c7aa25e 100644 --- a/migrations/versions/787bdba9e147_init_db.py +++ b/migrations/versions/787bdba9e147_init_db.py @@ -56,9 +56,9 @@ def seed_data(): op.bulk_insert(template_table, [ - {id: 1, 'name': 'basic_template_1', 'description': 'Basic Template #1'}, - {id: 2, 'name': 'basic_template_2', 'description': 'Basic Template #2'}, - {id: 3, 'name': 'basic_template_3', 'description': 'Basic Template #3'} + {'id': 1, 'name': 'basic_template_1', 'description': 'Basic Template #1'}, + {'id': 2, 'name': 'basic_template_2', 'description': 'Basic Template #2'}, + {'id': 3, 'name': 'basic_template_3', 'description': 'Basic Template #3'} ] ) diff --git a/migrations/versions/b24bf17725d2_add_unique_index_to_settings_table_keys.py b/migrations/versions/b24bf17725d2_add_unique_index_to_settings_table_keys.py new file mode 100644 index 000000000..63ae3eb37 --- /dev/null +++ b/migrations/versions/b24bf17725d2_add_unique_index_to_settings_table_keys.py @@ -0,0 +1,24 @@ +"""Add unique index to settings table keys + +Revision ID: b24bf17725d2 +Revises: f41520e41cee +Create Date: 2023-02-18 00:00:00.000000 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'b24bf17725d2' +down_revision = 'f41520e41cee' +branch_labels = None +depends_on = None + + +def upgrade(): + op.create_index(op.f('ix_setting_name'), 'setting', ['name'], unique=True) + + +def downgrade(): + op.drop_index(op.f('ix_setting_name'), table_name='setting') diff --git a/migrations/versions/f41520e41cee_update_domain_type_length.py b/migrations/versions/f41520e41cee_update_domain_type_length.py new file mode 100644 index 000000000..f4672de83 --- /dev/null +++ b/migrations/versions/f41520e41cee_update_domain_type_length.py @@ -0,0 +1,31 @@ +"""update domain type length + +Revision ID: f41520e41cee +Revises: 6ea7dc05f496 +Create Date: 2023-01-10 11:56:28.538485 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'f41520e41cee' +down_revision = '6ea7dc05f496' +branch_labels = None +depends_on = None + + +def upgrade(): + with op.batch_alter_table('domain') as batch_op: + batch_op.alter_column('type', + existing_type=sa.String(length=6), + type_=sa.String(length=8)) + + +def downgrade(): + with op.batch_alter_table('domain') as batch_op: + batch_op.alter_column('type', + existing_type=sa.String(length=8), + type_=sa.String(length=6)) + diff --git a/package.json b/package.json index 76982c81c..1ceb82efa 100644 --- a/package.json +++ b/package.json @@ -1,15 +1,22 @@ { "dependencies": { - "admin-lte": "2.4.9", - "bootstrap": "^3.4.1", - "bootstrap-datepicker": "^1.8.0", + "@fortawesome/fontawesome-free": "6.3.0", + "admin-lte": "3.2.0", + "bootstrap": "4.6.2", + "bootstrap-datepicker": "^1.9.0", "bootstrap-validator": "^0.11.9", - "datatables.net-plugins": "^1.10.19", + "datatables.net-plugins": "^1.13.1", "icheck": "^1.0.2", "jquery-slimscroll": "^1.3.8", - "jquery-ui-dist": "^1.12.1", + "jquery-sparkline": "^2.4.0", + "jquery-ui-dist": "^1.13.2", "jquery.quicksearch": "^2.4.0", - "jtimeout": "^3.1.0", + "jquery-validation": "^1.19.5", + "jtimeout": "^3.2.0", + "knockout": "^3.5.1", "multiselect": "^0.9.12" + }, + "resolutions": { + "admin-lte/@fortawesome/fontawesome-free": "6.3.0" } } diff --git a/powerdnsadmin/__init__.py b/powerdnsadmin/__init__.py index c70b273d4..660f96b6e 100755 --- a/powerdnsadmin/__init__.py +++ b/powerdnsadmin/__init__.py @@ -1,15 +1,14 @@ import os import logging from flask import Flask -from flask_seasurf import SeaSurf from flask_mail import Mail from werkzeug.middleware.proxy_fix import ProxyFix from flask_session import Session - from .lib import utils def create_app(config=None): + from powerdnsadmin.lib.settings import AppSettings from . import models, routes, services from .assets import assets app = Flask(__name__) @@ -33,31 +32,6 @@ def create_app(config=None): # Proxy app.wsgi_app = ProxyFix(app.wsgi_app) - # CSRF protection - csrf = SeaSurf(app) - csrf.exempt(routes.index.dyndns_checkip) - csrf.exempt(routes.index.dyndns_update) - csrf.exempt(routes.index.saml_authorized) - csrf.exempt(routes.api.api_login_create_zone) - csrf.exempt(routes.api.api_login_delete_zone) - csrf.exempt(routes.api.api_generate_apikey) - csrf.exempt(routes.api.api_delete_apikey) - csrf.exempt(routes.api.api_update_apikey) - csrf.exempt(routes.api.api_zone_subpath_forward) - csrf.exempt(routes.api.api_zone_forward) - csrf.exempt(routes.api.api_create_zone) - csrf.exempt(routes.api.api_create_account) - csrf.exempt(routes.api.api_delete_account) - csrf.exempt(routes.api.api_update_account) - csrf.exempt(routes.api.api_create_user) - csrf.exempt(routes.api.api_delete_user) - csrf.exempt(routes.api.api_update_user) - csrf.exempt(routes.api.api_list_account_users) - csrf.exempt(routes.api.api_add_account_user) - csrf.exempt(routes.api.api_remove_account_user) - csrf.exempt(routes.api.api_zone_cryptokeys) - csrf.exempt(routes.api.api_zone_cryptokey) - # Load config from env variables if using docker if os.path.exists(os.path.join(app.root_path, 'docker_config.py')): app.config.from_object('powerdnsadmin.docker_config') @@ -69,23 +43,31 @@ def create_app(config=None): if 'FLASK_CONF' in os.environ: app.config.from_envvar('FLASK_CONF') - # Load app sepecified configuration + # Load app specified configuration if config is not None: if isinstance(config, dict): app.config.update(config) elif config.endswith('.py'): app.config.from_pyfile(config) + # Load any settings defined with environment variables + AppSettings.load_environment(app) + # HSTS if app.config.get('HSTS_ENABLED'): from flask_sslify import SSLify _sslify = SSLify(app) # lgtm [py/unused-local-variable] # Load Flask-Session - if app.config.get('FILESYSTEM_SESSIONS_ENABLED'): - app.config['SESSION_TYPE'] = 'filesystem' - sess = Session() - sess.init_app(app) + app.config['SESSION_TYPE'] = app.config.get('SESSION_TYPE') + if 'SESSION_TYPE' in os.environ: + app.config['SESSION_TYPE'] = os.environ.get('SESSION_TYPE') + + sess = Session(app) + + # create sessions table if using sqlalchemy backend + if os.environ.get('SESSION_TYPE') == 'sqlalchemy': + sess.app.session_interface.db.create_all() # SMTP app.mail = Mail(app) @@ -100,13 +82,12 @@ def create_app(config=None): app.jinja_env.filters['display_record_name'] = utils.display_record_name app.jinja_env.filters['display_master_name'] = utils.display_master_name app.jinja_env.filters['display_second_to_time'] = utils.display_time - app.jinja_env.filters[ - 'email_to_gravatar_url'] = utils.email_to_gravatar_url - app.jinja_env.filters[ - 'display_setting_state'] = utils.display_setting_state + app.jinja_env.filters['display_setting_state'] = utils.display_setting_state app.jinja_env.filters['pretty_domain_name'] = utils.pretty_domain_name + app.jinja_env.filters['format_datetime_local'] = utils.format_datetime + app.jinja_env.filters['format_zone_type'] = utils.format_zone_type - # Register context proccessors + # Register context processors from .models.setting import Setting @app.context_processor @@ -119,9 +100,4 @@ def inject_setting(): setting = Setting() return dict(SETTING=setting) - @app.context_processor - def inject_mode(): - setting = app.config.get('OFFLINE_MODE', False) - return dict(OFFLINE_MODE=setting) - return app diff --git a/powerdnsadmin/assets.py b/powerdnsadmin/assets.py index e7c6354a0..d46d43113 100644 --- a/powerdnsadmin/assets.py +++ b/powerdnsadmin/assets.py @@ -4,65 +4,65 @@ class ConcatFilter(Filter): """ Filter that merges files, placing a semicolon between them. - - Fixes issues caused by missing semicolons at end of JS assets, for example - with last statement of jquery.pjax.js. """ def concat(self, out, hunks, **kw): out.write(';'.join([h.data() for h, info in hunks])) -css_login = Bundle('node_modules/bootstrap/dist/css/bootstrap.css', - 'node_modules/font-awesome/css/font-awesome.css', - 'node_modules/ionicons/dist/css/ionicons.css', - 'node_modules/icheck/skins/square/blue.css', - 'node_modules/admin-lte/dist/css/AdminLTE.css', - filters=('cssmin', 'cssrewrite'), - output='generated/login.css') +css_login = Bundle( + 'node_modules/@fortawesome/fontawesome-free/css/all.css', + 'node_modules/icheck/skins/square/blue.css', + 'node_modules/admin-lte/dist/css/adminlte.css', + filters=('rcssmin', 'cssrewrite'), + output='generated/login.css') -js_login = Bundle('node_modules/jquery/dist/jquery.js', - 'node_modules/bootstrap/dist/js/bootstrap.js', - 'node_modules/icheck/icheck.js', - 'custom/js/custom.js', - filters=(ConcatFilter, 'jsmin'), - output='generated/login.js') +js_login = Bundle( + 'node_modules/jquery/dist/jquery.js', + 'node_modules/bootstrap/dist/js/bootstrap.js', + 'node_modules/icheck/icheck.js', + 'node_modules/knockout/build/output/knockout-latest.js', + 'custom/js/custom.js', + filters=(ConcatFilter, 'rjsmin'), + output='generated/login.js') -js_validation = Bundle('node_modules/bootstrap-validator/dist/validator.js', - output='generated/validation.js') +js_validation = Bundle( + 'node_modules/bootstrap-validator/dist/validator.js', + output='generated/validation.js') css_main = Bundle( - 'node_modules/bootstrap/dist/css/bootstrap.css', - 'node_modules/font-awesome/css/font-awesome.css', - 'node_modules/ionicons/dist/css/ionicons.css', - 'node_modules/datatables.net-bs/css/dataTables.bootstrap.css', + 'node_modules/@fortawesome/fontawesome-free/css/all.css', + 'node_modules/datatables.net-bs4/css/dataTables.bootstrap4.css', 'node_modules/icheck/skins/square/blue.css', 'node_modules/multiselect/css/multi-select.css', - 'node_modules/admin-lte/dist/css/AdminLTE.css', - 'node_modules/admin-lte/dist/css/skins/_all-skins.css', + 'node_modules/admin-lte/dist/css/adminlte.css', 'custom/css/custom.css', 'node_modules/bootstrap-datepicker/dist/css/bootstrap-datepicker.css', - filters=('cssmin', 'cssrewrite'), + filters=('rcssmin', 'cssrewrite'), output='generated/main.css') -js_main = Bundle('node_modules/jquery/dist/jquery.js', - 'node_modules/jquery-ui-dist/jquery-ui.js', - 'node_modules/bootstrap/dist/js/bootstrap.js', - 'node_modules/datatables.net/js/jquery.dataTables.js', - 'node_modules/datatables.net-bs/js/dataTables.bootstrap.js', - 'node_modules/jquery-sparkline/jquery.sparkline.js', - 'node_modules/jquery-slimscroll/jquery.slimscroll.js', - 'node_modules/icheck/icheck.js', - 'node_modules/fastclick/lib/fastclick.js', - 'node_modules/moment/moment.js', - 'node_modules/admin-lte/dist/js/adminlte.js', - 'node_modules/multiselect/js/jquery.multi-select.js', - 'node_modules/datatables.net-plugins/sorting/natural.js', - 'node_modules/jtimeout/src/jTimeout.js', - 'node_modules/jquery.quicksearch/src/jquery.quicksearch.js', - 'custom/js/custom.js', - 'node_modules/bootstrap-datepicker/dist/js/bootstrap-datepicker.js', - filters=(ConcatFilter, 'jsmin'), - output='generated/main.js') +js_main = Bundle( + 'node_modules/jquery/dist/jquery.js', + 'node_modules/jquery-ui-dist/jquery-ui.js', + 'node_modules/bootstrap/dist/js/bootstrap.bundle.js', + 'node_modules/datatables.net/js/jquery.dataTables.js', + 'node_modules/datatables.net-bs4/js/dataTables.bootstrap4.js', + 'node_modules/jquery-sparkline/jquery.sparkline.js', + 'node_modules/jquery-slimscroll/jquery.slimscroll.js', + 'node_modules/jquery-validation/dist/jquery.validate.js', + 'node_modules/icheck/icheck.js', + 'node_modules/fastclick/lib/fastclick.js', + 'node_modules/moment/moment.js', + 'node_modules/admin-lte/dist/js/adminlte.js', + 'node_modules/multiselect/js/jquery.multi-select.js', + 'node_modules/datatables.net-plugins/sorting/natural.js', + 'node_modules/jtimeout/src/jTimeout.js', + 'node_modules/jquery.quicksearch/src/jquery.quicksearch.js', + 'node_modules/knockout/build/output/knockout-latest.js', + 'custom/js/app-authentication-settings-editor.js', + 'custom/js/custom.js', + 'node_modules/bootstrap-datepicker/dist/js/bootstrap-datepicker.js', + filters=(ConcatFilter, 'rjsmin'), + output='generated/main.js') assets = Environment() assets.register('js_login', js_login) diff --git a/powerdnsadmin/decorators.py b/powerdnsadmin/decorators.py index df1e348c8..560ca45bf 100644 --- a/powerdnsadmin/decorators.py +++ b/powerdnsadmin/decorators.py @@ -5,13 +5,15 @@ from flask_login import current_user from .models import User, ApiKey, Setting, Domain, Setting -from .lib.errors import RequestIsNotJSON, NotEnoughPrivileges -from .lib.errors import DomainAccessForbidden +from .lib.errors import RequestIsNotJSON, NotEnoughPrivileges, RecordTTLNotAllowed, RecordTypeNotAllowed +from .lib.errors import DomainAccessForbidden, DomainOverrideForbidden + def admin_role_required(f): """ Grant access if user is in Administrator role """ + @wraps(f) def decorated_function(*args, **kwargs): if current_user.role.name != 'Administrator': @@ -25,6 +27,7 @@ def operator_role_required(f): """ Grant access if user is in Operator role or higher """ + @wraps(f) def decorated_function(*args, **kwargs): if current_user.role.name not in ['Administrator', 'Operator']: @@ -38,6 +41,7 @@ def history_access_required(f): """ Grant access if user is in Operator role or higher, or Users can view history """ + @wraps(f) def decorated_function(*args, **kwargs): if current_user.role.name not in [ @@ -56,6 +60,7 @@ def can_access_domain(f): - user is in granted Account, or - user is in granted Domain """ + @wraps(f) def decorated_function(*args, **kwargs): if current_user.role.name not in ['Administrator', 'Operator']: @@ -82,10 +87,11 @@ def can_configure_dnssec(f): - user is in Operator role or higher, or - dnssec_admins_only is off """ + @wraps(f) def decorated_function(*args, **kwargs): if current_user.role.name not in [ - 'Administrator', 'Operator' + 'Administrator', 'Operator' ] and Setting().get('dnssec_admins_only'): abort(403) @@ -93,16 +99,18 @@ def decorated_function(*args, **kwargs): return decorated_function + def can_remove_domain(f): """ Grant access if: - user is in Operator role or higher, or - allow_user_remove_domain is on """ + @wraps(f) def decorated_function(*args, **kwargs): if current_user.role.name not in [ - 'Administrator', 'Operator' + 'Administrator', 'Operator' ] and not Setting().get('allow_user_remove_domain'): abort(403) return f(*args, **kwargs) @@ -110,17 +118,17 @@ def decorated_function(*args, **kwargs): return decorated_function - def can_create_domain(f): """ Grant access if: - user is in Operator role or higher, or - allow_user_create_domain is on """ + @wraps(f) def decorated_function(*args, **kwargs): if current_user.role.name not in [ - 'Administrator', 'Operator' + 'Administrator', 'Operator' ] and not Setting().get('allow_user_create_domain'): abort(403) return f(*args, **kwargs) @@ -132,48 +140,62 @@ def api_basic_auth(f): @wraps(f) def decorated_function(*args, **kwargs): auth_header = request.headers.get('Authorization') - if auth_header: - auth_header = auth_header.replace('Basic ', '', 1) - try: - auth_header = str(base64.b64decode(auth_header), 'utf-8') - username, password = auth_header.split(":") - except binascii.Error as e: - current_app.logger.error( - 'Invalid base64-encoded of credential. Error {0}'.format( - e)) - abort(401) - except TypeError as e: - current_app.logger.error('Error: {0}'.format(e)) + if not auth_header: + current_app.logger.error('Error: Authorization header missing!') + abort(401) + + if auth_header[:6] != "Basic ": + current_app.logger.error('Error: Unsupported authorization mechanism!') + abort(401) + + # Remove "Basic " from the header value + auth_header = auth_header[6:] + auth_components = [] + + try: + auth_header = str(base64.b64decode(auth_header), 'utf-8') + # NK: We use auth_components here as we don't know if we'll have a colon, + # we split it maximum 1 times to grab the username, the rest of the string would be the password. + auth_components = auth_header.split(':', maxsplit=1) + except (binascii.Error, UnicodeDecodeError) as e: + current_app.logger.error( + 'Invalid base64-encoded of credential. Error {0}'.format( + e)) + abort(401) + except TypeError as e: + current_app.logger.error('Error: {0}'.format(e)) + abort(401) + + # If we don't have two auth components (username, password), we can abort + if len(auth_components) != 2: + abort(401) + + (username, password) = auth_components + + user = User(username=username, + password=password, + plain_text_password=password) + + try: + if Setting().get('verify_user_email') and user.email and not user.confirmed: + current_app.logger.warning( + 'Basic authentication failed for user {} because of unverified email address' + .format(username)) abort(401) - user = User(username=username, - password=password, - plain_text_password=password) + auth_method = request.args.get('auth_method', 'LOCAL') + auth_method = 'LDAP' if auth_method != 'LOCAL' else 'LOCAL' + auth = user.is_validate(method=auth_method, src_ip=request.remote_addr) - try: - if Setting().get('verify_user_email') and user.email and not user.confirmed: - current_app.logger.warning( - 'Basic authentication failed for user {} because of unverified email address' - .format(username)) - abort(401) - - auth_method = request.args.get('auth_method', 'LOCAL') - auth_method = 'LDAP' if auth_method != 'LOCAL' else 'LOCAL' - auth = user.is_validate(method=auth_method, - src_ip=request.remote_addr) - - if not auth: - current_app.logger.error('Checking user password failed') - abort(401) - else: - user = User.query.filter(User.username == username).first() - current_user = user # lgtm [py/unused-local-variable] - except Exception as e: - current_app.logger.error('Error: {0}'.format(e)) + if not auth: + current_app.logger.error('Checking user password failed') abort(401) - else: - current_app.logger.error('Error: Authorization header missing!') + else: + user = User.query.filter(User.username == username).first() + current_user = user # lgtm [py/unused-local-variable] + except Exception as e: + current_app.logger.error('Error: {0}'.format(e)) abort(401) return f(*args, **kwargs) @@ -197,16 +219,19 @@ def callback_if_request_body_contains_key(callback, http_methods=[], keys=[]): If request body contains one or more of specified keys, call :param callback """ + def decorator(f): @wraps(f) def decorated_function(*args, **kwargs): check_current_http_method = not http_methods or request.method in http_methods if (check_current_http_method and - set(request.get_json(force=True).keys()).intersection(set(keys)) + set(request.get_json(force=True).keys()).intersection(set(keys)) ): callback(*args, **kwargs) return f(*args, **kwargs) + return decorated_function + return decorator @@ -232,16 +257,18 @@ def decorated_function(*args, **kwargs): except: username = None if ( - (current_user.role.name in roles) or - (allow_self and user_id and current_user.id == user_id) or - (allow_self and username and current_user.username == username) + (current_user.role.name in roles) or + (allow_self and user_id and current_user.id == user_id) or + (allow_self and username and current_user.username == username) ): return f(*args, **kwargs) msg = ( "User {} with role {} does not have enough privileges to {}" ).format(current_user.username, current_user.role.name, action) raise NotEnoughPrivileges(message=msg) + return decorated_function + return decorator @@ -251,14 +278,22 @@ def api_can_create_domain(f): - user is in Operator role or higher, or - allow_user_create_domain is on """ + @wraps(f) def decorated_function(*args, **kwargs): if current_user.role.name not in [ - 'Administrator', 'Operator' + 'Administrator', 'Operator' ] and not Setting().get('allow_user_create_domain'): - msg = "User {0} does not have enough privileges to create domain" + msg = "User {0} does not have enough privileges to create zone" current_app.logger.error(msg.format(current_user.username)) raise NotEnoughPrivileges() + + if Setting().get('deny_domain_override'): + req = request.get_json(force=True) + domain = Domain() + if req['name'] and domain.is_overriding(req['name']): + raise DomainOverrideForbidden() + return f(*args, **kwargs) return decorated_function @@ -269,15 +304,26 @@ def apikey_can_create_domain(f): Grant access if: - user is in Operator role or higher, or - allow_user_create_domain is on + and + - deny_domain_override is off or + - override_domain is true (from request) """ + @wraps(f) def decorated_function(*args, **kwargs): if g.apikey.role.name not in [ - 'Administrator', 'Operator' + 'Administrator', 'Operator' ] and not Setting().get('allow_user_create_domain'): - msg = "ApiKey #{0} does not have enough privileges to create domain" + msg = "ApiKey #{0} does not have enough privileges to create zone" current_app.logger.error(msg.format(g.apikey.id)) raise NotEnoughPrivileges() + + if Setting().get('deny_domain_override'): + req = request.get_json(force=True) + domain = Domain() + if req['name'] and domain.is_overriding(req['name']): + raise DomainOverrideForbidden() + return f(*args, **kwargs) return decorated_function @@ -289,20 +335,23 @@ def apikey_can_remove_domain(http_methods=[]): - user is in Operator role or higher, or - allow_user_remove_domain is on """ + def decorator(f): @wraps(f) def decorated_function(*args, **kwargs): check_current_http_method = not http_methods or request.method in http_methods if (check_current_http_method and - g.apikey.role.name not in ['Administrator', 'Operator'] and - not Setting().get('allow_user_remove_domain') + g.apikey.role.name not in ['Administrator', 'Operator'] and + not Setting().get('allow_user_remove_domain') ): - msg = "ApiKey #{0} does not have enough privileges to remove domain" + msg = "ApiKey #{0} does not have enough privileges to remove zone" current_app.logger.error(msg.format(g.apikey.id)) raise NotEnoughPrivileges() return f(*args, **kwargs) + return decorated_function + return decorator @@ -310,10 +359,11 @@ def apikey_is_admin(f): """ Grant access if user is in Administrator role """ + @wraps(f) def decorated_function(*args, **kwargs): if g.apikey.role.name != 'Administrator': - msg = "Apikey {0} does not have enough privileges to create domain" + msg = "Apikey {0} does not have enough privileges to create zone" current_app.logger.error(msg.format(g.apikey.id)) raise NotEnoughPrivileges() return f(*args, **kwargs) @@ -327,6 +377,7 @@ def apikey_can_access_domain(f): - user has Operator role or higher, or - user has explicitly been granted access to domain """ + @wraps(f) def decorated_function(*args, **kwargs): if g.apikey.role.name not in ['Administrator', 'Operator']: @@ -351,23 +402,82 @@ def apikey_can_configure_dnssec(http_methods=[]): - user is in Operator role or higher, or - dnssec_admins_only is off """ + def decorator(f=None): @wraps(f) def decorated_function(*args, **kwargs): check_current_http_method = not http_methods or request.method in http_methods if (check_current_http_method and - g.apikey.role.name not in ['Administrator', 'Operator'] and - Setting().get('dnssec_admins_only') + g.apikey.role.name not in ['Administrator', 'Operator'] and + Setting().get('dnssec_admins_only') ): msg = "ApiKey #{0} does not have enough privileges to configure dnssec" current_app.logger.error(msg.format(g.apikey.id)) raise DomainAccessForbidden(message=msg) return f(*args, **kwargs) if f else None + return decorated_function + return decorator +def allowed_record_types(f): + @wraps(f) + def decorated_function(*args, **kwargs): + if request.method in ['GET', 'DELETE', 'PUT']: + return f(*args, **kwargs) + + if g.apikey.role.name in ['Administrator', 'Operator']: + return f(*args, **kwargs) + + records_allowed_to_edit = Setting().get_records_allow_to_edit() + content = request.get_json() + try: + for record in content['rrsets']: + if 'type' not in record: + raise RecordTypeNotAllowed() + + if record['type'] not in records_allowed_to_edit: + current_app.logger.error(f"Error: Record type not allowed: {record['type']}") + raise RecordTypeNotAllowed(message=f"Record type not allowed: {record['type']}") + except (TypeError, KeyError) as e: + raise e + return f(*args, **kwargs) + + return decorated_function + + +def allowed_record_ttl(f): + @wraps(f) + def decorated_function(*args, **kwargs): + if not Setting().get('enforce_api_ttl'): + return f(*args, **kwargs) + + if request.method == 'GET': + return f(*args, **kwargs) + + if g.apikey.role.name in ['Administrator', 'Operator']: + return f(*args, **kwargs) + + allowed_ttls = Setting().get_ttl_options() + allowed_numeric_ttls = [ttl[0] for ttl in allowed_ttls] + content = request.get_json() + try: + for record in content['rrsets']: + if 'ttl' not in record: + raise RecordTTLNotAllowed() + + if record['ttl'] not in allowed_numeric_ttls: + current_app.logger.error(f"Error: Record TTL not allowed: {record['ttl']}") + raise RecordTTLNotAllowed(message=f"Record TTL not allowed: {record['ttl']}") + except (TypeError, KeyError) as e: + raise e + return f(*args, **kwargs) + + return decorated_function + + def apikey_auth(f): @wraps(f) def decorated_function(*args, **kwargs): @@ -375,10 +485,8 @@ def decorated_function(*args, **kwargs): if auth_header: try: apikey_val = str(base64.b64decode(auth_header), 'utf-8') - except binascii.Error as e: - current_app.logger.error( - 'Invalid base64-encoded of credential. Error {0}'.format( - e)) + except (binascii.Error, UnicodeDecodeError) as e: + current_app.logger.error('Invalid base64-encoded X-API-KEY. Error {0}'.format(e)) abort(401) except TypeError as e: current_app.logger.error('Error: {0}'.format(e)) @@ -414,6 +522,7 @@ def decorated_function(*args, **kwargs): return decorated_function + def apikey_or_basic_auth(f): @wraps(f) def decorated_function(*args, **kwargs): @@ -422,4 +531,5 @@ def decorated_function(*args, **kwargs): return apikey_auth(f)(*args, **kwargs) else: return api_basic_auth(f)(*args, **kwargs) + return decorated_function diff --git a/powerdnsadmin/default_config.py b/powerdnsadmin/default_config.py index 16b816185..ff192b0cf 100644 --- a/powerdnsadmin/default_config.py +++ b/powerdnsadmin/default_config.py @@ -1,34 +1,32 @@ import os -import urllib.parse + basedir = os.path.abspath(os.path.dirname(__file__)) -### BASIC APP CONFIG -SALT = '$2b$12$yLUMTIfl21FKJQpTkRQXCu' -SECRET_KEY = 'e951e5a1f4b94151b360f47edf596dd2' BIND_ADDRESS = '0.0.0.0' -PORT = 9191 +CAPTCHA_ENABLE = True +CAPTCHA_HEIGHT = 60 +CAPTCHA_LENGTH = 6 +CAPTCHA_SESSION_KEY = 'captcha_image' +CAPTCHA_WIDTH = 160 +CSRF_COOKIE_HTTPONLY = True HSTS_ENABLED = False -OFFLINE_MODE = False -FILESYSTEM_SESSIONS_ENABLED = False - -### DATABASE CONFIG -SQLA_DB_USER = 'pda' -SQLA_DB_PASSWORD = 'changeme' -SQLA_DB_HOST = '127.0.0.1' -SQLA_DB_NAME = 'pda' -SQLALCHEMY_TRACK_MODIFICATIONS = True - -### DATABASE - MySQL -SQLALCHEMY_DATABASE_URI = 'mysql://{}:{}@{}/{}'.format( - urllib.parse.quote_plus(SQLA_DB_USER), - urllib.parse.quote_plus(SQLA_DB_PASSWORD), - SQLA_DB_HOST, - SQLA_DB_NAME -) - -### DATABASE - SQLite -# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'pdns.db') - -# SAML Authnetication -SAML_ENABLED = False +PORT = 9191 +SALT = '$2b$12$yLUMTIfl21FKJQpTkRQXCu' SAML_ASSERTION_ENCRYPTED = True +SAML_ENABLED = False +SECRET_KEY = 'e951e5a1f4b94151b360f47edf596dd2' +SERVER_EXTERNAL_SSL = os.getenv('SERVER_EXTERNAL_SSL', True) +SESSION_COOKIE_SAMESITE = 'Lax' +SESSION_TYPE = 'sqlalchemy' +SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'pdns.db') +SQLALCHEMY_TRACK_MODIFICATIONS = True +# SQLA_DB_USER = 'pda' +# SQLA_DB_PASSWORD = 'changeme' +# SQLA_DB_HOST = '127.0.0.1' +# SQLA_DB_NAME = 'pda' +# SQLALCHEMY_DATABASE_URI = 'mysql://{}:{}@{}/{}'.format( +# urllib.parse.quote_plus(SQLA_DB_USER), +# urllib.parse.quote_plus(SQLA_DB_PASSWORD), +# SQLA_DB_HOST, +# SQLA_DB_NAME +# ) diff --git a/powerdnsadmin/lib/certutil.py b/powerdnsadmin/lib/certutil.py index 9e09cf63c..863e40b4f 100644 --- a/powerdnsadmin/lib/certutil.py +++ b/powerdnsadmin/lib/certutil.py @@ -1,48 +1,58 @@ -from OpenSSL import crypto -from datetime import datetime -import pytz +import datetime import os -CRYPT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../") + +from cryptography import x509 +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.x509.oid import NameOID + + +CRYPT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../") CERT_FILE = CRYPT_PATH + "/saml_cert.crt" KEY_FILE = CRYPT_PATH + "/saml_cert.key" -def check_certificate(): - if not os.path.isfile(CERT_FILE): - return False - st_cert = open(CERT_FILE, 'rt').read() - cert = crypto.load_certificate(crypto.FILETYPE_PEM, st_cert) - now = datetime.now(pytz.utc) - begin = datetime.strptime(cert.get_notBefore(), "%Y%m%d%H%M%SZ").replace(tzinfo=pytz.UTC) - begin_ok = begin < now - end = datetime.strptime(cert.get_notAfter(), "%Y%m%d%H%M%SZ").replace(tzinfo=pytz.UTC) - end_ok = end > now - if begin_ok and end_ok: - return True - return False - def create_self_signed_cert(): + """ Generate a new self-signed RSA-2048-SHA256 x509 certificate. """ + # Generate our key + key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + ) + + # Write our key to disk for safe keeping + with open(KEY_FILE, "wb") as key_file: + key_file.write(key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), + )) + + # Various details about who we are. For a self-signed certificate the + # subject and issuer are always the same. + subject = issuer = x509.Name([ + x509.NameAttribute(NameOID.COUNTRY_NAME, "DE"), + x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "NRW"), + x509.NameAttribute(NameOID.LOCALITY_NAME, "Dortmund"), + x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Dummy Company Ltd"), + x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "Dummy Company Ltd"), + x509.NameAttribute(NameOID.COMMON_NAME, "PowerDNS-Admin"), + ]) + + cert = x509.CertificateBuilder().subject_name( + subject + ).issuer_name( + issuer + ).public_key( + key.public_key() + ).serial_number( + x509.random_serial_number() + ).not_valid_before( + datetime.datetime.utcnow() + ).not_valid_after( + datetime.datetime.utcnow() + datetime.timedelta(days=10*365) + ).sign(key, hashes.SHA256()) - # create a key pair - k = crypto.PKey() - k.generate_key(crypto.TYPE_RSA, 2048) - - # create a self-signed cert - cert = crypto.X509() - cert.get_subject().C = "DE" - cert.get_subject().ST = "NRW" - cert.get_subject().L = "Dortmund" - cert.get_subject().O = "Dummy Company Ltd" - cert.get_subject().OU = "Dummy Company Ltd" - cert.get_subject().CN = "PowerDNS-Admin" - cert.set_serial_number(1000) - cert.gmtime_adj_notBefore(0) - cert.gmtime_adj_notAfter(10*365*24*60*60) - cert.set_issuer(cert.get_subject()) - cert.set_pubkey(k) - cert.sign(k, 'sha256') - - open(CERT_FILE, "bw").write( - crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) - open(KEY_FILE, "bw").write( - crypto.dump_privatekey(crypto.FILETYPE_PEM, k)) \ No newline at end of file + # Write our certificate out to disk. + with open(CERT_FILE, "wb") as cert_file: + cert_file.write(cert.public_bytes(serialization.Encoding.PEM)) diff --git a/powerdnsadmin/lib/errors.py b/powerdnsadmin/lib/errors.py index 687f5543e..75887241c 100644 --- a/powerdnsadmin/lib/errors.py +++ b/powerdnsadmin/lib/errors.py @@ -21,7 +21,7 @@ def to_dict(self): class DomainNotExists(StructuredException): status_code = 404 - def __init__(self, name=None, message="Domain does not exist"): + def __init__(self, name=None, message="Zone does not exist"): StructuredException.__init__(self) self.message = message self.name = name @@ -30,7 +30,7 @@ def __init__(self, name=None, message="Domain does not exist"): class DomainAlreadyExists(StructuredException): status_code = 409 - def __init__(self, name=None, message="Domain already exists"): + def __init__(self, name=None, message="Zone already exists"): StructuredException.__init__(self) self.message = message self.name = name @@ -39,11 +39,18 @@ def __init__(self, name=None, message="Domain already exists"): class DomainAccessForbidden(StructuredException): status_code = 403 - def __init__(self, name=None, message="Domain access not allowed"): + def __init__(self, name=None, message="Zone access not allowed"): StructuredException.__init__(self) self.message = message self.name = name +class DomainOverrideForbidden(StructuredException): + status_code = 409 + + def __init__(self, name=None, message="Zone override of record not allowed"): + StructuredException.__init__(self) + self.message = message + self.name = name class ApiKeyCreateFail(StructuredException): status_code = 500 @@ -60,7 +67,7 @@ class ApiKeyNotUsable(StructuredException): def __init__( self, name=None, - message=("Api key must have domains or accounts" + message=("Api key must have zones or accounts" " or an administrative role")): StructuredException.__init__(self) self.message = message @@ -129,6 +136,13 @@ def __init__(self, name=None, message="Account does not exist"): self.message = message self.name = name +class InvalidAccountNameException(StructuredException): + status_code = 400 + + def __init__(self, name=None, message="The account name is invalid"): + StructuredException.__init__(self) + self.message = message + self.name = name class UserCreateFail(StructuredException): status_code = 500 @@ -138,7 +152,6 @@ def __init__(self, name=None, message="Creation of user failed"): self.message = message self.name = name - class UserCreateDuplicate(StructuredException): status_code = 409 @@ -163,7 +176,6 @@ def __init__(self, name=None, message="Update of user failed"): self.message = message self.name = name - class UserDeleteFail(StructuredException): status_code = 500 @@ -171,3 +183,19 @@ def __init__(self, name=None, message="Delete of user failed"): StructuredException.__init__(self) self.message = message self.name = name + +class RecordTypeNotAllowed(StructuredException): + status_code = 400 + + def __init__(self, name=None, message="Record type not allowed or does not present"): + StructuredException.__init__(self) + self.message = message + self.name = name + +class RecordTTLNotAllowed(StructuredException): + status_code = 400 + + def __init__(self, name=None, message="Record TTL not allowed or does not present"): + StructuredException.__init__(self) + self.message = message + self.name = name diff --git a/powerdnsadmin/lib/helper.py b/powerdnsadmin/lib/helper.py index a5925ef84..1b5a08264 100644 --- a/powerdnsadmin/lib/helper.py +++ b/powerdnsadmin/lib/helper.py @@ -14,9 +14,9 @@ def forward_request(): msg_str = "Sending request to powerdns API {0}" if request.method != 'GET' and request.method != 'DELETE': - msg = msg_str.format(request.get_json(force=True)) + msg = msg_str.format(request.get_json(force=True, silent=True)) current_app.logger.debug(msg) - data = request.get_json(force=True) + data = request.get_json(force=True, silent=True) verify = False diff --git a/powerdnsadmin/lib/settings.py b/powerdnsadmin/lib/settings.py new file mode 100644 index 000000000..b154112af --- /dev/null +++ b/powerdnsadmin/lib/settings.py @@ -0,0 +1,638 @@ +import os +from pathlib import Path + +basedir = os.path.abspath(Path(os.path.dirname(__file__)).parent) + +class AppSettings(object): + + defaults = { + # Flask Settings + 'bind_address': '0.0.0.0', + 'csrf_cookie_secure': False, + 'log_level': 'WARNING', + 'port': 9191, + 'salt': '$2b$12$yLUMTIfl21FKJQpTkRQXCu', + 'secret_key': 'e951e5a1f4b94151b360f47edf596dd2', + 'session_cookie_secure': False, + 'session_type': 'sqlalchemy', + 'sqlalchemy_track_modifications': True, + 'sqlalchemy_database_uri': 'sqlite:///' + os.path.join(basedir, 'pdns.db'), + 'sqlalchemy_engine_options': {}, + + # General Settings + 'captcha_enable': True, + 'captcha_height': 60, + 'captcha_length': 6, + 'captcha_session_key': 'captcha_image', + 'captcha_width': 160, + 'mail_server': 'localhost', + 'mail_port': 25, + 'mail_debug': False, + 'mail_use_ssl': False, + 'mail_use_tls': False, + 'mail_username': '', + 'mail_password': '', + 'mail_default_sender': '', + 'remote_user_enabled': False, + 'remote_user_cookies': [], + 'remote_user_logout_url': '', + 'hsts_enabled': False, + 'server_external_ssl': True, + 'maintenance': False, + 'fullscreen_layout': True, + 'record_helper': True, + 'login_ldap_first': True, + 'default_record_table_size': 15, + 'default_domain_table_size': 10, + 'auto_ptr': False, + 'record_quick_edit': True, + 'pretty_ipv6_ptr': False, + 'dnssec_admins_only': False, + 'allow_user_create_domain': False, + 'allow_user_remove_domain': False, + 'allow_user_view_history': False, + 'custom_history_header': '', + 'delete_sso_accounts': False, + 'bg_domain_updates': False, + 'enable_api_rr_history': True, + 'preserve_history': False, + 'site_name': 'PowerDNS-Admin', + 'site_url': 'http://localhost:9191', + 'session_timeout': 10, + 'warn_session_timeout': True, + 'pdns_api_url': '', + 'pdns_api_key': '', + 'pdns_api_timeout': 30, + 'pdns_version': '4.1.1', + 'verify_ssl_connections': True, + 'verify_user_email': False, + 'enforce_api_ttl': False, + 'ttl_options': '1 minute,5 minutes,30 minutes,60 minutes,24 hours', + 'otp_field_enabled': True, + 'custom_css': '', + 'otp_force': False, + 'max_history_records': 1000, + 'deny_domain_override': False, + 'account_name_extra_chars': False, + 'gravatar_enabled': False, + 'pdns_admin_log_level': 'WARNING', + + # Local Authentication Settings + 'local_db_enabled': True, + 'signup_enabled': True, + 'pwd_enforce_characters': False, + 'pwd_min_len': 10, + 'pwd_min_lowercase': 3, + 'pwd_min_uppercase': 2, + 'pwd_min_digits': 2, + 'pwd_min_special': 1, + 'pwd_enforce_complexity': False, + 'pwd_min_complexity': 11, + + # LDAP Authentication Settings + 'ldap_enabled': False, + 'ldap_type': 'ldap', + 'ldap_uri': '', + 'ldap_base_dn': '', + 'ldap_admin_username': '', + 'ldap_admin_password': '', + 'ldap_domain': '', + 'ldap_filter_basic': '', + 'ldap_filter_username': '', + 'ldap_filter_group': '', + 'ldap_filter_groupname': '', + 'ldap_sg_enabled': False, + 'ldap_admin_group': '', + 'ldap_operator_group': '', + 'ldap_user_group': '', + 'autoprovisioning': False, + 'autoprovisioning_attribute': '', + 'urn_value': '', + 'purge': False, + + # Google OAuth Settings + 'google_oauth_enabled': False, + 'google_oauth_client_id': '', + 'google_oauth_client_secret': '', + 'google_oauth_scope': 'openid email profile', + 'google_base_url': 'https://www.googleapis.com/oauth2/v3/', + 'google_oauth_auto_configure': True, + 'google_oauth_metadata_url': 'https://accounts.google.com/.well-known/openid-configuration', + 'google_token_url': 'https://oauth2.googleapis.com/token', + 'google_authorize_url': 'https://accounts.google.com/o/oauth2/v2/auth', + + # GitHub OAuth Settings + 'github_oauth_enabled': False, + 'github_oauth_key': '', + 'github_oauth_secret': '', + 'github_oauth_scope': 'email', + 'github_oauth_api_url': 'https://api.github.com/user', + 'github_oauth_auto_configure': False, + 'github_oauth_metadata_url': '', + 'github_oauth_token_url': 'https://github.com/login/oauth/access_token', + 'github_oauth_authorize_url': 'https://github.com/login/oauth/authorize', + + # Azure OAuth Settings + 'azure_oauth_enabled': False, + 'azure_oauth_key': '', + 'azure_oauth_secret': '', + 'azure_oauth_scope': 'User.Read openid email profile', + 'azure_oauth_api_url': 'https://graph.microsoft.com/v1.0/', + 'azure_oauth_auto_configure': True, + 'azure_oauth_metadata_url': '', + 'azure_oauth_token_url': '', + 'azure_oauth_authorize_url': '', + 'azure_sg_enabled': False, + 'azure_admin_group': '', + 'azure_operator_group': '', + 'azure_user_group': '', + 'azure_group_accounts_enabled': False, + 'azure_group_accounts_name': 'displayName', + 'azure_group_accounts_name_re': '', + 'azure_group_accounts_description': 'description', + 'azure_group_accounts_description_re': '', + + # OIDC OAuth Settings + 'oidc_oauth_enabled': False, + 'oidc_oauth_key': '', + 'oidc_oauth_secret': '', + 'oidc_oauth_scope': 'email', + 'oidc_oauth_api_url': '', + 'oidc_oauth_auto_configure': True, + 'oidc_oauth_metadata_url': '', + 'oidc_oauth_token_url': '', + 'oidc_oauth_authorize_url': '', + 'oidc_oauth_logout_url': '', + 'oidc_oauth_username': 'preferred_username', + 'oidc_oauth_email': 'email', + 'oidc_oauth_firstname': 'given_name', + 'oidc_oauth_last_name': 'family_name', + 'oidc_oauth_account_name_property': '', + 'oidc_oauth_account_description_property': '', + + # SAML Authentication Settings + 'saml_enabled': False, + 'saml_debug': False, + 'saml_path': os.path.join(basedir, 'saml'), + 'saml_metadata_url': None, + 'saml_metadata_cache_lifetime': 1, + 'saml_idp_sso_binding': 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect', + 'saml_idp_entity_id': None, + 'saml_nameid_format': None, + 'saml_attribute_account': None, + 'saml_attribute_email': 'email', + 'saml_attribute_givenname': 'givenname', + 'saml_attribute_surname': 'surname', + 'saml_attribute_name': None, + 'saml_attribute_username': None, + 'saml_attribute_admin': None, + 'saml_attribute_group': None, + 'saml_group_admin_name': None, + 'saml_group_operator_name': None, + 'saml_group_to_account_mapping': None, + 'saml_sp_entity_id': None, + 'saml_sp_contact_name': None, + 'saml_sp_contact_mail': None, + 'saml_sign_request': False, + 'saml_want_message_signed': True, + 'saml_logout': True, + 'saml_logout_url': None, + 'saml_assertion_encrypted': True, + 'saml_cert': None, + 'saml_key': None, + + # Zone Record Settings + 'forward_records_allow_edit': { + 'A': True, + 'AAAA': True, + 'AFSDB': False, + 'ALIAS': False, + 'CAA': True, + 'CERT': False, + 'CDNSKEY': False, + 'CDS': False, + 'CNAME': True, + 'DNSKEY': False, + 'DNAME': False, + 'DS': False, + 'HINFO': False, + 'KEY': False, + 'LOC': True, + 'LUA': False, + 'MX': True, + 'NAPTR': False, + 'NS': True, + 'NSEC': False, + 'NSEC3': False, + 'NSEC3PARAM': False, + 'OPENPGPKEY': False, + 'PTR': True, + 'RP': False, + 'RRSIG': False, + 'SOA': False, + 'SPF': True, + 'SSHFP': False, + 'SRV': True, + 'TKEY': False, + 'TSIG': False, + 'TLSA': False, + 'SMIMEA': False, + 'TXT': True, + 'URI': False + }, + 'reverse_records_allow_edit': { + 'A': False, + 'AAAA': False, + 'AFSDB': False, + 'ALIAS': False, + 'CAA': False, + 'CERT': False, + 'CDNSKEY': False, + 'CDS': False, + 'CNAME': False, + 'DNSKEY': False, + 'DNAME': False, + 'DS': False, + 'HINFO': False, + 'KEY': False, + 'LOC': True, + 'LUA': False, + 'MX': False, + 'NAPTR': False, + 'NS': True, + 'NSEC': False, + 'NSEC3': False, + 'NSEC3PARAM': False, + 'OPENPGPKEY': False, + 'PTR': True, + 'RP': False, + 'RRSIG': False, + 'SOA': False, + 'SPF': False, + 'SSHFP': False, + 'SRV': False, + 'TKEY': False, + 'TSIG': False, + 'TLSA': False, + 'SMIMEA': False, + 'TXT': True, + 'URI': False + }, + } + + types = { + # Flask Settings + 'bind_address': str, + 'csrf_cookie_secure': bool, + 'log_level': str, + 'port': int, + 'salt': str, + 'secret_key': str, + 'session_cookie_secure': bool, + 'session_type': str, + 'sqlalchemy_track_modifications': bool, + 'sqlalchemy_database_uri': str, + 'sqlalchemy_engine_options': dict, + + # General Settings + 'captcha_enable': bool, + 'captcha_height': int, + 'captcha_length': int, + 'captcha_session_key': str, + 'captcha_width': int, + 'mail_server': str, + 'mail_port': int, + 'mail_debug': bool, + 'mail_use_ssl': bool, + 'mail_use_tls': bool, + 'mail_username': str, + 'mail_password': str, + 'mail_default_sender': str, + 'hsts_enabled': bool, + 'remote_user_enabled': bool, + 'remote_user_cookies': list, + 'remote_user_logout_url': str, + 'maintenance': bool, + 'fullscreen_layout': bool, + 'record_helper': bool, + 'login_ldap_first': bool, + 'default_record_table_size': int, + 'default_domain_table_size': int, + 'auto_ptr': bool, + 'record_quick_edit': bool, + 'pretty_ipv6_ptr': bool, + 'dnssec_admins_only': bool, + 'allow_user_create_domain': bool, + 'allow_user_remove_domain': bool, + 'allow_user_view_history': bool, + 'custom_history_header': str, + 'delete_sso_accounts': bool, + 'bg_domain_updates': bool, + 'enable_api_rr_history': bool, + 'preserve_history': bool, + 'site_name': str, + 'site_url': str, + 'session_timeout': int, + 'warn_session_timeout': bool, + 'pdns_api_url': str, + 'pdns_api_key': str, + 'pdns_api_timeout': int, + 'pdns_version': str, + 'verify_ssl_connections': bool, + 'verify_user_email': bool, + 'enforce_api_ttl': bool, + 'ttl_options': str, + 'otp_field_enabled': bool, + 'custom_css': str, + 'otp_force': bool, + 'max_history_records': int, + 'deny_domain_override': bool, + 'account_name_extra_chars': bool, + 'gravatar_enabled': bool, + 'pdns_admin_log_level': str, + 'forward_records_allow_edit': dict, + 'reverse_records_allow_edit': dict, + + # Local Authentication Settings + 'local_db_enabled': bool, + 'signup_enabled': bool, + 'pwd_enforce_characters': bool, + 'pwd_min_len': int, + 'pwd_min_lowercase': int, + 'pwd_min_uppercase': int, + 'pwd_min_digits': int, + 'pwd_min_special': int, + 'pwd_enforce_complexity': bool, + 'pwd_min_complexity': int, + + # LDAP Authentication Settings + 'ldap_enabled': bool, + 'ldap_type': str, + 'ldap_uri': str, + 'ldap_base_dn': str, + 'ldap_admin_username': str, + 'ldap_admin_password': str, + 'ldap_domain': str, + 'ldap_filter_basic': str, + 'ldap_filter_username': str, + 'ldap_filter_group': str, + 'ldap_filter_groupname': str, + 'ldap_sg_enabled': bool, + 'ldap_admin_group': str, + 'ldap_operator_group': str, + 'ldap_user_group': str, + 'autoprovisioning': bool, + 'autoprovisioning_attribute': str, + 'urn_value': str, + 'purge': bool, + + # Google OAuth Settings + 'google_oauth_enabled': bool, + 'google_oauth_client_id': str, + 'google_oauth_client_secret': str, + 'google_oauth_scope': str, + 'google_base_url': str, + 'google_oauth_auto_configure': bool, + 'google_oauth_metadata_url': str, + 'google_token_url': str, + 'google_authorize_url': str, + + # GitHub OAuth Settings + 'github_oauth_enabled': bool, + 'github_oauth_key': str, + 'github_oauth_secret': str, + 'github_oauth_scope': str, + 'github_oauth_api_url': str, + 'github_oauth_auto_configure': bool, + 'github_oauth_metadata_url': str, + 'github_oauth_token_url': str, + 'github_oauth_authorize_url': str, + + # Azure OAuth Settings + 'azure_oauth_enabled': bool, + 'azure_oauth_key': str, + 'azure_oauth_secret': str, + 'azure_oauth_scope': str, + 'azure_oauth_api_url': str, + 'azure_oauth_auto_configure': bool, + 'azure_oauth_metadata_url': str, + 'azure_oauth_token_url': str, + 'azure_oauth_authorize_url': str, + 'azure_sg_enabled': bool, + 'azure_admin_group': str, + 'azure_operator_group': str, + 'azure_user_group': str, + 'azure_group_accounts_enabled': bool, + 'azure_group_accounts_name': str, + 'azure_group_accounts_name_re': str, + 'azure_group_accounts_description': str, + 'azure_group_accounts_description_re': str, + + # OIDC OAuth Settings + 'oidc_oauth_enabled': bool, + 'oidc_oauth_key': str, + 'oidc_oauth_secret': str, + 'oidc_oauth_scope': str, + 'oidc_oauth_api_url': str, + 'oidc_oauth_auto_configure': bool, + 'oidc_oauth_metadata_url': str, + 'oidc_oauth_token_url': str, + 'oidc_oauth_authorize_url': str, + 'oidc_oauth_logout_url': str, + 'oidc_oauth_username': str, + 'oidc_oauth_email': str, + 'oidc_oauth_firstname': str, + 'oidc_oauth_last_name': str, + 'oidc_oauth_account_name_property': str, + 'oidc_oauth_account_description_property': str, + + # SAML Authentication Settings + 'saml_enabled': bool, + 'saml_debug': bool, + 'saml_path': str, + 'saml_metadata_url': str, + 'saml_metadata_cache_lifetime': int, + 'saml_idp_sso_binding': str, + 'saml_idp_entity_id': str, + 'saml_nameid_format': str, + 'saml_attribute_account': str, + 'saml_attribute_email': str, + 'saml_attribute_givenname': str, + 'saml_attribute_surname': str, + 'saml_attribute_name': str, + 'saml_attribute_username': str, + 'saml_attribute_admin': str, + 'saml_attribute_group': str, + 'saml_group_admin_name': str, + 'saml_group_operator_name': str, + 'saml_group_to_account_mapping': str, + 'saml_sp_entity_id': str, + 'saml_sp_contact_name': str, + 'saml_sp_contact_mail': str, + 'saml_sign_request': bool, + 'saml_want_message_signed': bool, + 'saml_logout': bool, + 'saml_logout_url': str, + 'saml_assertion_encrypted': bool, + 'saml_cert': str, + 'saml_key': str, + } + + groups = { + 'authentication': [ + # Local Authentication Settings + 'local_db_enabled', + 'signup_enabled', + 'pwd_enforce_characters', + 'pwd_min_len', + 'pwd_min_lowercase', + 'pwd_min_uppercase', + 'pwd_min_digits', + 'pwd_min_special', + 'pwd_enforce_complexity', + 'pwd_min_complexity', + + # LDAP Authentication Settings + 'ldap_enabled', + 'ldap_type', + 'ldap_uri', + 'ldap_base_dn', + 'ldap_admin_username', + 'ldap_admin_password', + 'ldap_domain', + 'ldap_filter_basic', + 'ldap_filter_username', + 'ldap_filter_group', + 'ldap_filter_groupname', + 'ldap_sg_enabled', + 'ldap_admin_group', + 'ldap_operator_group', + 'ldap_user_group', + 'autoprovisioning', + 'autoprovisioning_attribute', + 'urn_value', + 'purge', + + # Google OAuth Settings + 'google_oauth_enabled', + 'google_oauth_client_id', + 'google_oauth_client_secret', + 'google_oauth_scope', + 'google_base_url', + 'google_oauth_auto_configure', + 'google_oauth_metadata_url', + 'google_token_url', + 'google_authorize_url', + + # GitHub OAuth Settings + 'github_oauth_enabled', + 'github_oauth_key', + 'github_oauth_secret', + 'github_oauth_scope', + 'github_oauth_api_url', + 'github_oauth_auto_configure', + 'github_oauth_metadata_url', + 'github_oauth_token_url', + 'github_oauth_authorize_url', + + # Azure OAuth Settings + 'azure_oauth_enabled', + 'azure_oauth_key', + 'azure_oauth_secret', + 'azure_oauth_scope', + 'azure_oauth_api_url', + 'azure_oauth_auto_configure', + 'azure_oauth_metadata_url', + 'azure_oauth_token_url', + 'azure_oauth_authorize_url', + 'azure_sg_enabled', + 'azure_admin_group', + 'azure_operator_group', + 'azure_user_group', + 'azure_group_accounts_enabled', + 'azure_group_accounts_name', + 'azure_group_accounts_name_re', + 'azure_group_accounts_description', + 'azure_group_accounts_description_re', + + # OIDC OAuth Settings + 'oidc_oauth_enabled', + 'oidc_oauth_key', + 'oidc_oauth_secret', + 'oidc_oauth_scope', + 'oidc_oauth_api_url', + 'oidc_oauth_auto_configure', + 'oidc_oauth_metadata_url', + 'oidc_oauth_token_url', + 'oidc_oauth_authorize_url', + 'oidc_oauth_logout_url', + 'oidc_oauth_username', + 'oidc_oauth_email', + 'oidc_oauth_firstname', + 'oidc_oauth_last_name', + 'oidc_oauth_account_name_property', + 'oidc_oauth_account_description_property', + ] + } + + @staticmethod + def convert_type(name, value): + import json + from json import JSONDecodeError + if name in AppSettings.types: + var_type = AppSettings.types[name] + + # Handle boolean values + if var_type == bool and isinstance(value, str): + if value.lower() in ['True', 'true', '1'] or value is True: + return True + else: + return False + + # Handle float values + if var_type == float: + return float(value) + + # Handle integer values + if var_type == int: + return int(value) + + if (var_type == dict or var_type == list) and isinstance(value, str) and len(value) > 0: + try: + return json.loads(value) + except JSONDecodeError as e: + # Provide backwards compatibility for legacy non-JSON format + value = value.replace("'", '"').replace('True', 'true').replace('False', 'false') + try: + return json.loads(value) + except JSONDecodeError as e: + raise ValueError('Cannot parse json {} for variable {}'.format(value, name)) + + if var_type == str: + return str(value) + + return value + + @staticmethod + def load_environment(app): + """ Load app settings from environment variables when defined. """ + import os + + for var_name, default_value in AppSettings.defaults.items(): + env_name = var_name.upper() + current_value = None + + if env_name + '_FILE' in os.environ: + if env_name in os.environ: + raise AttributeError( + "Both {} and {} are set but are exclusive.".format( + env_name, env_name + '_FILE')) + with open(os.environ[env_name + '_FILE']) as f: + current_value = f.read() + f.close() + + elif env_name in os.environ: + current_value = os.environ[env_name] + + if current_value is not None: + app.config[env_name] = AppSettings.convert_type(var_name, current_value) diff --git a/powerdnsadmin/lib/utils.py b/powerdnsadmin/lib/utils.py index 951f75009..f8cc997d3 100644 --- a/powerdnsadmin/lib/utils.py +++ b/powerdnsadmin/lib/utils.py @@ -2,13 +2,12 @@ import re import json import requests -import hashlib import ipaddress +import idna from collections.abc import Iterable from distutils.version import StrictVersion from urllib.parse import urlparse -from datetime import datetime, timedelta def auth_from_url(url): @@ -133,6 +132,16 @@ def display_master_name(data): return ", ".join(matches) +def format_zone_type(data): + """Formats the given zone type for modern social standards.""" + data = str(data).lower() + if data == 'master': + data = 'primary' + elif data == 'slave': + data = 'secondary' + return data.title() + + def display_time(amount, units='s', remove_seconds=True): """ Convert timestamp to normal time format @@ -185,17 +194,6 @@ def pdns_api_extended_uri(version): return "" -def email_to_gravatar_url(email="", size=100): - """ - AD doesn't necessarily have email - """ - if email is None: - email = "" - - hash_string = hashlib.md5(email.encode('utf-8')).hexdigest() - return "https://s.gravatar.com/avatar/{0}?s={1}".format(hash_string, size) - - def display_setting_state(value): if value == 1: return "ON" @@ -229,29 +227,49 @@ def ensure_list(l): yield from l -class customBoxes: - boxes = { - "reverse": (" ", " "), - "ip6arpa": ("ip6", "%.ip6.arpa"), - "inaddrarpa": ("in-addr", "%.in-addr.arpa") - } - order = ["reverse", "ip6arpa", "inaddrarpa"] +def pretty_domain_name(domain_name): + # Add a debugging statement to print out the domain name + print("Received zone name:", domain_name) -def pretty_domain_name(value): - """ - Display domain name in original format. - If it is IDN domain (Punycode starts with xn--), do the - idna decoding. - Note that any part of the domain name can be individually punycoded - """ - if isinstance(value, str): - if value.startswith('xn--') \ - or value.find('.xn--') != -1: + # Check if the domain name is encoded using Punycode + if domain_name.endswith('.xn--'): + try: + # Decode the domain name using the idna library + domain_name = idna.decode(domain_name) + except Exception as e: + # If the decoding fails, raise an exception with more information + raise Exception('Cannot decode IDN zone: {}'.format(e)) + + # Return the "pretty" version of the zone name + return domain_name + + +def to_idna(value, action): + splits = value.split('.') + result = [] + if action == 'encode': + for split in splits: try: - return value.encode().decode('idna') - except: - raise Exception("Cannot decode IDN domain") - else: - return value + # Try encoding to idna + if not split.startswith('_') and not split.startswith('-'): + result.append(idna.encode(split).decode()) + else: + result.append(split) + except idna.IDNAError: + result.append(split) + elif action == 'decode': + for split in splits: + if not split.startswith('_') and not split.startswith('--'): + result.append(idna.decode(split)) + else: + result.append(split) else: - raise Exception("Require the Punycode in string format") + raise Exception('No valid action received') + return '.'.join(result) + + +def format_datetime(value, format_str="%Y-%m-%d %I:%M %p"): + """Format a date time to (Default): YYYY-MM-DD HH:MM P""" + if value is None: + return "" + return value.strftime(format_str) diff --git a/powerdnsadmin/models/account.py b/powerdnsadmin/models/account.py index 4d08fc196..ab2341ec2 100644 --- a/powerdnsadmin/models/account.py +++ b/powerdnsadmin/models/account.py @@ -3,6 +3,7 @@ from urllib.parse import urljoin from ..lib import utils +from ..lib.errors import InvalidAccountNameException from .base import db from .setting import Setting from .user import User @@ -22,7 +23,7 @@ class Account(db.Model): back_populates="accounts") def __init__(self, name=None, description=None, contact=None, mail=None): - self.name = name + self.name = Account.sanitize_name(name) if name is not None else name self.description = description self.contact = contact self.mail = mail @@ -33,9 +34,30 @@ def __init__(self, name=None, description=None, contact=None, mail=None): self.PDNS_VERSION = Setting().get('pdns_version') self.API_EXTENDED_URL = utils.pdns_api_extended_uri(self.PDNS_VERSION) - if self.name is not None: - self.name = ''.join(c for c in self.name.lower() - if c in "abcdefghijklmnopqrstuvwxyz0123456789") + + @staticmethod + def sanitize_name(name): + """ + Formats the provided name to fit into the constraint + """ + if not isinstance(name, str): + raise InvalidAccountNameException("Account name must be a string") + + allowed_characters = "abcdefghijklmnopqrstuvwxyz0123456789" + + if Setting().get('account_name_extra_chars'): + allowed_characters += "_-." + + sanitized_name = ''.join(c for c in name.lower() if c in allowed_characters) + + if len(sanitized_name) > Account.name.type.length: + current_app.logger.error("Account name {0} too long. Truncated to: {1}".format( + sanitized_name, sanitized_name[:Account.name.type.length])) + + if not sanitized_name: + raise InvalidAccountNameException("Empty string is not a valid account name") + + return sanitized_name[:Account.name.type.length] def __repr__(self): return ''.format(self.name) @@ -68,11 +90,9 @@ def create_account(self): """ Create a new account """ - # Sanity check - account name - if self.name == "": - return {'status': False, 'msg': 'No account name specified'} + self.name = Account.sanitize_name(self.name) - # check that account name is not already used + # Check that account name is not already used account = Account.query.filter(Account.name == self.name).first() if account: return {'status': False, 'msg': 'Account already exists'} diff --git a/powerdnsadmin/models/api_key.py b/powerdnsadmin/models/api_key.py index 4c26cd223..7bd0fda68 100644 --- a/powerdnsadmin/models/api_key.py +++ b/powerdnsadmin/models/api_key.py @@ -60,31 +60,31 @@ def delete(self): def update(self, role_name=None, description=None, domains=None, accounts=None): try: - if role_name: - role = Role.query.filter(Role.name == role_name).first() - self.role_id = role.id - - if description: - self.description = description - - if domains is not None: - domain_object_list = Domain.query \ - .filter(Domain.name.in_(domains)) \ - .all() - self.domains[:] = domain_object_list - - if accounts is not None: - account_object_list = Account.query \ - .filter(Account.name.in_(accounts)) \ - .all() - self.accounts[:] = account_object_list - - db.session.commit() + if role_name: + role = Role.query.filter(Role.name == role_name).first() + self.role_id = role.id + + if description: + self.description = description + + if domains is not None: + domain_object_list = Domain.query \ + .filter(Domain.name.in_(domains)) \ + .all() + self.domains[:] = domain_object_list + + if accounts is not None: + account_object_list = Account.query \ + .filter(Account.name.in_(accounts)) \ + .all() + self.accounts[:] = account_object_list + + db.session.commit() except Exception as e: - msg_str = 'Update of apikey failed. Error: {0}' - current_app.logger.error(msg_str.format(e)) - db.session.rollback - raise e + msg_str = 'Update of apikey failed. Error: {0}' + current_app.logger.error(msg_str.format(e)) + db.session.rollback() # fixed line + raise e def get_hashed_password(self, plain_text_password=None): # Hash a password for the first time diff --git a/powerdnsadmin/models/domain.py b/powerdnsadmin/models/domain.py index cfc949dbe..f0b9a30bf 100644 --- a/powerdnsadmin/models/domain.py +++ b/powerdnsadmin/models/domain.py @@ -2,6 +2,7 @@ import re import traceback from flask import current_app +from flask_login import current_user from urllib.parse import urljoin from distutils.util import strtobool @@ -20,7 +21,7 @@ class Domain(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255), index=True, unique=True) master = db.Column(db.String(128)) - type = db.Column(db.String(6), nullable=False) + type = db.Column(db.String(8), nullable=False) serial = db.Column(db.BigInteger) notified_serial = db.Column(db.BigInteger) last_check = db.Column(db.Integer) @@ -67,13 +68,13 @@ def add_setting(self, setting, value): return True except Exception as e: current_app.logger.error( - 'Can not create setting {0} for domain {1}. {2}'.format( + 'Can not create setting {0} for zone {1}. {2}'.format( setting, self.name, e)) return False def get_domain_info(self, domain_name): """ - Get all domains which has in PowerDNS + Get all zones which has in PowerDNS """ headers = {'X-API-Key': self.PDNS_API_KEY} jdata = utils.fetch_json(urljoin( @@ -87,7 +88,7 @@ def get_domain_info(self, domain_name): def get_domains(self): """ - Get all domains which has in PowerDNS + Get all zones which has in PowerDNS """ headers = {'X-API-Key': self.PDNS_API_KEY} jdata = utils.fetch_json( @@ -107,17 +108,33 @@ def get_id_by_name(self, name): return domain.id except Exception as e: current_app.logger.error( - 'Domain does not exist. ERROR: {0}'.format(e)) + 'Zone does not exist. ERROR: {0}'.format(e)) return None + def search_idn_domains(self, search_string): + """ + Search for IDN zones using the provided search string. + """ + # Compile the regular expression pattern for matching IDN zone names + idn_pattern = re.compile(r'^xn--') + + # Search for zone names that match the IDN pattern + idn_domains = [ + domain for domain in self.get_domains() if idn_pattern.match(domain) + ] + + # Filter the search results based on the provided search string + return [domain for domain in idn_domains if search_string in domain] + + def update(self): """ - Fetch zones (domains) from PowerDNS and update into DB + Fetch zones (zones) from PowerDNS and update into DB """ db_domain = Domain.query.all() list_db_domain = [d.name for d in db_domain] dict_db_domain = dict((x.name, x) for x in db_domain) - current_app.logger.info("Found {} domains in PowerDNS-Admin".format( + current_app.logger.info("Found {} zones in PowerDNS-Admin".format( len(list_db_domain))) headers = {'X-API-Key': self.PDNS_API_KEY} try: @@ -132,20 +149,31 @@ def update(self): "Found {} zones in PowerDNS server".format(len(list_jdomain))) try: - # domains should remove from db since it doesn't exist in powerdns anymore + # zones should remove from db since it doesn't exist in powerdns anymore should_removed_db_domain = list( set(list_db_domain).difference(list_jdomain)) for domain_name in should_removed_db_domain: self.delete_domain_from_pdnsadmin(domain_name, do_commit=False) except Exception as e: current_app.logger.error( - 'Can not delete domain from DB. DETAIL: {0}'.format(e)) + 'Can not delete zone from DB. DETAIL: {0}'.format(e)) current_app.logger.debug(traceback.format_exc()) - # update/add new domain + # update/add new zone + account_cache = {} for data in jdata: if 'account' in data: - account_id = Account().get_id_by_name(data['account']) + # if no account is set don't try to query db + if data['account'] == '': + find_account_id = None + else: + find_account_id = account_cache.get(data['account']) + # if account was not queried in the past and hence not in cache + if find_account_id is None: + find_account_id = Account().get_id_by_name(data['account']) + # add to cache + account_cache[data['account']] = find_account_id + account_id = find_account_id else: current_app.logger.debug( "No 'account' data found in API result - Unsupported PowerDNS version?" @@ -159,16 +187,16 @@ def update(self): self.add_domain_to_powerdns_admin(domain=data, do_commit=False) db.session.commit() - current_app.logger.info('Update domain finished') + current_app.logger.info('Update zone finished') return { 'status': 'ok', - 'msg': 'Domain table has been updated successfully' + 'msg': 'Zone table has been updated successfully' } except Exception as e: db.session.rollback() current_app.logger.error( - 'Cannot update domain table. Error: {0}'.format(e)) - return {'status': 'error', 'msg': 'Cannot update domain table'} + 'Cannot update zone table. Error: {0}'.format(e)) + return {'status': 'error', 'msg': 'Cannot update zone table'} def update_pdns_admin_domain(self, domain, account_id, data, do_commit=True): # existing domain, only update if something actually has changed @@ -190,11 +218,11 @@ def update_pdns_admin_domain(self, domain, account_id, data, do_commit=True): try: if do_commit: db.session.commit() - current_app.logger.info("Updated PDNS-Admin domain {0}".format( + current_app.logger.info("Updated PDNS-Admin zone {0}".format( domain.name)) except Exception as e: db.session.rollback() - current_app.logger.info("Rolled back Domain {0} {1}".format( + current_app.logger.info("Rolled back zone {0} {1}".format( domain.name, e)) raise @@ -206,7 +234,7 @@ def add(self, domain_master_ips=[], account_name=None): """ - Add a domain to power dns + Add a zone to power dns """ headers = {'X-API-Key': self.PDNS_API_KEY, 'Content-Type': 'application/json'} @@ -241,23 +269,23 @@ def add(self, if 'error' in jdata.keys(): current_app.logger.error(jdata['error']) if jdata.get('http_code') == 409: - return {'status': 'error', 'msg': 'Domain already exists'} + return {'status': 'error', 'msg': 'Zone already exists'} return {'status': 'error', 'msg': jdata['error']} else: current_app.logger.info( - 'Added domain successfully to PowerDNS: {0}'.format( + 'Added zone successfully to PowerDNS: {0}'.format( domain_name)) self.add_domain_to_powerdns_admin(domain_dict=post_data) - return {'status': 'ok', 'msg': 'Added domain successfully'} + return {'status': 'ok', 'msg': 'Added zone successfully'} except Exception as e: - current_app.logger.error('Cannot add domain {0} {1}'.format( + current_app.logger.error('Cannot add zone {0} {1}'.format( domain_name, e)) current_app.logger.debug(traceback.format_exc()) - return {'status': 'error', 'msg': 'Cannot add this domain.'} + return {'status': 'error', 'msg': 'Cannot add this zone.'} def add_domain_to_powerdns_admin(self, domain=None, domain_dict=None, do_commit=True): """ - Read Domain from PowerDNS and add into PDNS-Admin + Read zone from PowerDNS and add into PDNS-Admin """ headers = {'X-API-Key': self.PDNS_API_KEY} if not domain: @@ -271,7 +299,7 @@ def add_domain_to_powerdns_admin(self, domain=None, domain_dict=None, do_commit= timeout=int(Setting().get('pdns_api_timeout')), verify=Setting().get('verify_ssl_connections')) except Exception as e: - current_app.logger.error('Can not read domain from PDNS') + current_app.logger.error('Can not read zone from PDNS') current_app.logger.error(e) current_app.logger.debug(traceback.format_exc()) @@ -297,20 +325,20 @@ def add_domain_to_powerdns_admin(self, domain=None, domain_dict=None, do_commit= if do_commit: db.session.commit() current_app.logger.info( - "Synced PowerDNS Domain to PDNS-Admin: {0}".format(d.name)) + "Synced PowerDNS zone to PDNS-Admin: {0}".format(d.name)) return { 'status': 'ok', - 'msg': 'Added Domain successfully to PowerDNS-Admin' + 'msg': 'Added zone successfully to PowerDNS-Admin' } except Exception as e: db.session.rollback() - current_app.logger.info("Rolled back Domain {0}".format(d.name)) + current_app.logger.info("Rolled back zone {0}".format(d.name)) raise def update_soa_setting(self, domain_name, soa_edit_api): domain = Domain.query.filter(Domain.name == domain_name).first() if not domain: - return {'status': 'error', 'msg': 'Domain does not exist.'} + return {'status': 'error', 'msg': 'Zone does not exist.'} headers = {'X-API-Key': self.PDNS_API_KEY, 'Content-Type': 'application/json'} @@ -337,7 +365,7 @@ def update_soa_setting(self, domain_name, soa_edit_api): return {'status': 'error', 'msg': jdata['error']} else: current_app.logger.info( - 'soa-edit-api changed for domain {0} successfully'.format( + 'soa-edit-api changed for zone {0} successfully'.format( domain_name)) return { 'status': 'ok', @@ -347,11 +375,11 @@ def update_soa_setting(self, domain_name, soa_edit_api): current_app.logger.debug(e) current_app.logger.debug(traceback.format_exc()) current_app.logger.error( - 'Cannot change soa-edit-api for domain {0}'.format( + 'Cannot change soa-edit-api for zone {0}'.format( domain_name)) return { 'status': 'error', - 'msg': 'Cannot change soa-edit-api for this domain.' + 'msg': 'Cannot change soa-edit-api for this zone.' } def update_kind(self, domain_name, kind, masters=[]): @@ -360,7 +388,7 @@ def update_kind(self, domain_name, kind, masters=[]): """ domain = Domain.query.filter(Domain.name == domain_name).first() if not domain: - return {'status': 'error', 'msg': 'Domain does not exist.'} + return {'status': 'error', 'msg': 'Znoe does not exist.'} headers = {'X-API-Key': self.PDNS_API_KEY, 'Content-Type': 'application/json'} @@ -381,26 +409,26 @@ def update_kind(self, domain_name, kind, masters=[]): return {'status': 'error', 'msg': jdata['error']} else: current_app.logger.info( - 'Update domain kind for {0} successfully'.format( + 'Update zone kind for {0} successfully'.format( domain_name)) return { 'status': 'ok', - 'msg': 'Domain kind changed successfully' + 'msg': 'Zone kind changed successfully' } except Exception as e: current_app.logger.error( - 'Cannot update kind for domain {0}. Error: {1}'.format( + 'Cannot update kind for zone {0}. Error: {1}'.format( domain_name, e)) current_app.logger.debug(traceback.format_exc()) return { 'status': 'error', - 'msg': 'Cannot update kind for this domain.' + 'msg': 'Cannot update kind for this zone.' } def create_reverse_domain(self, domain_name, domain_reverse_name): """ - Check the existing reverse lookup domain, + Check the existing reverse lookup zone, if not exists create a new one automatically """ domain_obj = Domain.query.filter(Domain.name == domain_name).first() @@ -420,7 +448,7 @@ def create_reverse_domain(self, domain_name, domain_reverse_name): result = self.add(domain_reverse_name, 'Master', 'DEFAULT', [], []) self.update() if result['status'] == 'ok': - history = History(msg='Add reverse lookup domain {0}'.format( + history = History(msg='Add reverse lookup zone {0}'.format( domain_reverse_name), detail=json.dumps({ 'domain_type': 'Master', @@ -431,7 +459,7 @@ def create_reverse_domain(self, domain_name, domain_reverse_name): else: return { 'status': 'error', - 'msg': 'Adding reverse lookup domain failed' + 'msg': 'Adding reverse lookup zone failed' } domain_user_ids = self.get_user() if len(domain_user_ids) > 0: @@ -441,13 +469,13 @@ def create_reverse_domain(self, domain_name, domain_reverse_name): 'status': 'ok', 'msg': - 'New reverse lookup domain created with granted privileges' + 'New reverse lookup zone created with granted privileges' } return { 'status': 'ok', - 'msg': 'New reverse lookup domain created without users' + 'msg': 'New reverse lookup zone created without users' } - return {'status': 'ok', 'msg': 'Reverse lookup domain already exists'} + return {'status': 'ok', 'msg': 'Reverse lookup zone already exists'} def get_reverse_domain_name(self, reverse_host_address): c = 1 @@ -476,22 +504,22 @@ def get_reverse_domain_name(self, reverse_host_address): def delete(self, domain_name): """ - Delete a single domain name from powerdns + Delete a single zone name from powerdns """ try: self.delete_domain_from_powerdns(domain_name) self.delete_domain_from_pdnsadmin(domain_name) - return {'status': 'ok', 'msg': 'Delete domain successfully'} + return {'status': 'ok', 'msg': 'Delete zone successfully'} except Exception as e: current_app.logger.error( - 'Cannot delete domain {0}'.format(domain_name)) + 'Cannot delete zone {0}'.format(domain_name)) current_app.logger.error(e) current_app.logger.debug(traceback.format_exc()) - return {'status': 'error', 'msg': 'Cannot delete domain'} + return {'status': 'error', 'msg': 'Cannot delete zone'} def delete_domain_from_powerdns(self, domain_name): """ - Delete a single domain name from powerdns + Delete a single zone name from powerdns """ headers = {'X-API-Key': self.PDNS_API_KEY} @@ -503,12 +531,12 @@ def delete_domain_from_powerdns(self, domain_name): method='DELETE', verify=Setting().get('verify_ssl_connections')) current_app.logger.info( - 'Deleted domain successfully from PowerDNS: {0}'.format( + 'Deleted zone successfully from PowerDNS: {0}'.format( domain_name)) - return {'status': 'ok', 'msg': 'Delete domain successfully'} + return {'status': 'ok', 'msg': 'Delete zone successfully'} def delete_domain_from_pdnsadmin(self, domain_name, do_commit=True): - # Revoke permission before deleting domain + # Revoke permission before deleting zone domain = Domain.query.filter(Domain.name == domain_name).first() domain_user = DomainUser.query.filter( DomainUser.domain_id == domain.id) @@ -520,24 +548,25 @@ def delete_domain_from_pdnsadmin(self, domain_name, do_commit=True): domain_setting.delete() domain.apikeys[:] = [] - # Remove history for domain - domain_history = History.query.filter( - History.domain_id == domain.id - ) - if domain_history: - domain_history.delete() + # Remove history for zone + if not Setting().get('preserve_history'): + domain_history = History.query.filter( + History.domain_id == domain.id + ) + if domain_history: + domain_history.delete() - # then remove domain + # then remove zone Domain.query.filter(Domain.name == domain_name).delete() if do_commit: db.session.commit() current_app.logger.info( - "Deleted domain successfully from pdnsADMIN: {}".format( + "Deleted zone successfully from pdnsADMIN: {}".format( domain_name)) def get_user(self): """ - Get users (id) who have access to this domain name + Get users (id) who have access to this zone name """ user_ids = [] query = db.session.query( @@ -567,7 +596,7 @@ def grant_privileges(self, new_user_ids): except Exception as e: db.session.rollback() current_app.logger.error( - 'Cannot revoke user privileges on domain {0}. DETAIL: {1}'. + 'Cannot revoke user privileges on zone {0}. DETAIL: {1}'. format(self.name, e)) current_app.logger.debug(print(traceback.format_exc())) @@ -579,7 +608,7 @@ def grant_privileges(self, new_user_ids): except Exception as e: db.session.rollback() current_app.logger.error( - 'Cannot grant user privileges to domain {0}. DETAIL: {1}'. + 'Cannot grant user privileges to zone {0}. DETAIL: {1}'. format(self.name, e)) current_app.logger.debug(print(traceback.format_exc())) @@ -596,7 +625,7 @@ def revoke_privileges_by_id(self, user_id): def add_user(self, user): """ - Add a single user to Domain by User + Add a single user to zone by User """ try: du = DomainUser(self.id, user.id) @@ -606,7 +635,7 @@ def add_user(self, user): except Exception as e: db.session.rollback() current_app.logger.error( - 'Cannot add user privileges on domain {0}. DETAIL: {1}'. + 'Cannot add user privileges on zone {0}. DETAIL: {1}'. format(self.name, e)) return False @@ -614,6 +643,8 @@ def update_from_master(self, domain_name): """ Update records from Master DNS server """ + import urllib.parse + domain = Domain.query.filter(Domain.name == domain_name).first() if domain: headers = {'X-API-Key': self.PDNS_API_KEY} @@ -621,7 +652,7 @@ def update_from_master(self, domain_name): r = utils.fetch_json(urljoin( self.PDNS_STATS_URL, self.API_EXTENDED_URL + '/servers/localhost/zones/{0}/axfr-retrieve'.format( - domain.name)), + urllib.parse.quote_plus(domain.name))), headers=headers, timeout=int( Setting().get('pdns_api_timeout')), @@ -638,12 +669,14 @@ def update_from_master(self, domain_name): 'There was something wrong, please contact administrator' } else: - return {'status': 'error', 'msg': 'This domain does not exist'} + return {'status': 'error', 'msg': 'This zone does not exist'} def get_domain_dnssec(self, domain_name): """ - Get domain DNSSEC information + Get zone DNSSEC information """ + import urllib.parse + domain = Domain.query.filter(Domain.name == domain_name).first() if domain: headers = {'X-API-Key': self.PDNS_API_KEY} @@ -652,7 +685,7 @@ def get_domain_dnssec(self, domain_name): urljoin( self.PDNS_STATS_URL, self.API_EXTENDED_URL + '/servers/localhost/zones/{0}/cryptokeys'.format( - domain.name)), + urllib.parse.quote_plus(domain.name))), headers=headers, timeout=int(Setting().get('pdns_api_timeout')), method='GET', @@ -660,13 +693,13 @@ def get_domain_dnssec(self, domain_name): if 'error' in jdata: return { 'status': 'error', - 'msg': 'DNSSEC is not enabled for this domain' + 'msg': 'DNSSEC is not enabled for this zone' } else: return {'status': 'ok', 'dnssec': jdata} except Exception as e: current_app.logger.error( - 'Cannot get domain dnssec. DETAIL: {0}'.format(e)) + 'Cannot get zone dnssec. DETAIL: {0}'.format(e)) return { 'status': 'error', @@ -674,12 +707,14 @@ def get_domain_dnssec(self, domain_name): 'There was something wrong, please contact administrator' } else: - return {'status': 'error', 'msg': 'This domain does not exist'} + return {'status': 'error', 'msg': 'This zone does not exist'} def enable_domain_dnssec(self, domain_name): """ - Enable domain DNSSEC + Enable zone DNSSEC """ + import urllib.parse + domain = Domain.query.filter(Domain.name == domain_name).first() if domain: headers = {'X-API-Key': self.PDNS_API_KEY, 'Content-Type': 'application/json'} @@ -689,7 +724,9 @@ def enable_domain_dnssec(self, domain_name): jdata = utils.fetch_json( urljoin( self.PDNS_STATS_URL, self.API_EXTENDED_URL + - '/servers/localhost/zones/{0}'.format(domain.name)), + '/servers/localhost/zones/{0}'.format( + urllib.parse.quote_plus(domain.name) + )), headers=headers, timeout=int(Setting().get('pdns_api_timeout')), method='PUT', @@ -699,7 +736,7 @@ def enable_domain_dnssec(self, domain_name): return { 'status': 'error', 'msg': - 'API-RECTIFY could not be enabled for this domain', + 'API-RECTIFY could not be enabled for this zone', 'jdata': jdata } @@ -709,7 +746,8 @@ def enable_domain_dnssec(self, domain_name): urljoin( self.PDNS_STATS_URL, self.API_EXTENDED_URL + '/servers/localhost/zones/{0}/cryptokeys'.format( - domain.name)), + urllib.parse.quote_plus(domain.name) + )), headers=headers, timeout=int(Setting().get('pdns_api_timeout')), method='POST', @@ -720,7 +758,7 @@ def enable_domain_dnssec(self, domain_name): 'status': 'error', 'msg': - 'Cannot enable DNSSEC for this domain. Error: {0}'. + 'Cannot enable DNSSEC for this zone. Error: {0}'. format(jdata['error']), 'jdata': jdata @@ -740,12 +778,14 @@ def enable_domain_dnssec(self, domain_name): } else: - return {'status': 'error', 'msg': 'This domain does not exist'} + return {'status': 'error', 'msg': 'This zone does not exist'} def delete_dnssec_key(self, domain_name, key_id): """ Remove keys DNSSEC """ + import urllib.parse + domain = Domain.query.filter(Domain.name == domain_name).first() if domain: headers = {'X-API-Key': self.PDNS_API_KEY, 'Content-Type': 'application/json'} @@ -755,7 +795,7 @@ def delete_dnssec_key(self, domain_name, key_id): urljoin( self.PDNS_STATS_URL, self.API_EXTENDED_URL + '/servers/localhost/zones/{0}/cryptokeys/{1}'.format( - domain.name, key_id)), + urllib.parse.quote_plus(domain.name), key_id)), headers=headers, timeout=int(Setting().get('pdns_api_timeout')), method='DELETE', @@ -765,13 +805,13 @@ def delete_dnssec_key(self, domain_name, key_id): 'status': 'error', 'msg': - 'Cannot disable DNSSEC for this domain. Error: {0}'. + 'Cannot disable DNSSEC for this zone. Error: {0}'. format(jdata['error']), 'jdata': jdata } - # Disable API-RECTIFY for domain, AFTER deactivating DNSSEC + # Disable API-RECTIFY for zone, AFTER deactivating DNSSEC post_data = {"api_rectify": False} jdata = utils.fetch_json( urljoin( @@ -786,7 +826,7 @@ def delete_dnssec_key(self, domain_name, key_id): return { 'status': 'error', 'msg': - 'API-RECTIFY could not be disabled for this domain', + 'API-RECTIFY could not be disabled for this zone', 'jdata': jdata } @@ -805,25 +845,26 @@ def delete_dnssec_key(self, domain_name, key_id): } else: - return {'status': 'error', 'msg': 'This domain does not exist'} + return {'status': 'error', 'msg': 'This zone does not exist'} def assoc_account(self, account_id, update=True): """ - Associate domain with a domain, specified by account id + Associate account with a zone, specified by account id """ domain_name = self.name # Sanity check - domain name if domain_name == "": - return {'status': False, 'msg': 'No domain name specified'} + return {'status': False, 'msg': 'No zone name specified'} # read domain and check that it exists domain = Domain.query.filter(Domain.name == domain_name).first() if not domain: - return {'status': False, 'msg': 'Domain does not exist'} + return {'status': False, 'msg': 'Zone does not exist'} headers = {'X-API-Key': self.PDNS_API_KEY, 'Content-Type': 'application/json'} + account_name_old = Account().get_name_by_id(domain.account_id) account_name = Account().get_name_by_id(account_id) post_data = {"account": account_name} @@ -845,23 +886,30 @@ def assoc_account(self, account_id, update=True): else: if update: self.update() - msg_str = 'Account changed for domain {0} successfully' + msg_str = 'Account changed for zone {0} successfully' current_app.logger.info(msg_str.format(domain_name)) + history = History(msg='Update zone {0} associate account {1}'.format(domain.name, 'none' if account_name == '' else account_name), + detail = json.dumps({ + 'assoc_account': 'None' if account_name == '' else account_name, + 'dissoc_account': 'None' if account_name_old == '' else account_name_old + }), + created_by=current_user.username) + history.add() return {'status': 'ok', 'msg': 'account changed successfully'} except Exception as e: current_app.logger.debug(e) current_app.logger.debug(traceback.format_exc()) - msg_str = 'Cannot change account for domain {0}' + msg_str = 'Cannot change account for zone {0}' current_app.logger.error(msg_str.format(domain_name)) return { 'status': 'error', - 'msg': 'Cannot change account for this domain.' + 'msg': 'Cannot change account for this zone.' } def get_account(self): """ - Get current account associated with this domain + Get current account associated with this zone """ domain = Domain.query.filter(Domain.name == self.name).first() @@ -870,7 +918,7 @@ def get_account(self): def is_valid_access(self, user_id): """ Check if the user is allowed to access this - domain name + zone name """ return db.session.query(Domain) \ .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ @@ -881,3 +929,18 @@ def is_valid_access(self, user_id): DomainUser.user_id == user_id, AccountUser.user_id == user_id )).filter(Domain.id == self.id).first() + + # Return None if this zone does not exist as record, + # Return the parent zone that hold the record if exist + def is_overriding(self, domain_name): + upper_domain_name = '.'.join(domain_name.split('.')[1:]) + while upper_domain_name != '': + if self.get_id_by_name(upper_domain_name.rstrip('.')) != None: + upper_domain = self.get_domain_info(upper_domain_name) + if 'rrsets' in upper_domain: + for r in upper_domain['rrsets']: + if domain_name.rstrip('.') in r['name'].rstrip('.'): + current_app.logger.error('Zone already exists as a record: {} under zone: {}'.format(r['name'].rstrip('.'), upper_domain_name)) + return upper_domain_name + upper_domain_name = '.'.join(upper_domain_name.split('.')[1:]) + return None diff --git a/powerdnsadmin/models/domain_template.py b/powerdnsadmin/models/domain_template.py index 1b3c6ffc7..70463ac49 100644 --- a/powerdnsadmin/models/domain_template.py +++ b/powerdnsadmin/models/domain_template.py @@ -45,11 +45,11 @@ def create(self): return {'status': 'ok', 'msg': 'Template has been created'} except Exception as e: current_app.logger.error( - 'Can not update domain template table. Error: {0}'.format(e)) + 'Can not update zone template table. Error: {0}'.format(e)) db.session.rollback() return { 'status': 'error', - 'msg': 'Can not update domain template table' + 'msg': 'Can not update zone template table' } def delete_template(self): @@ -60,6 +60,6 @@ def delete_template(self): return {'status': 'ok', 'msg': 'Template has been deleted'} except Exception as e: current_app.logger.error( - 'Can not delete domain template. Error: {0}'.format(e)) + 'Can not delete zone template. Error: {0}'.format(e)) db.session.rollback() - return {'status': 'error', 'msg': 'Can not delete domain template'} \ No newline at end of file + return {'status': 'error', 'msg': 'Can not delete zone template'} diff --git a/powerdnsadmin/models/domain_template_record.py b/powerdnsadmin/models/domain_template_record.py index bd86c6efb..465b07d4d 100644 --- a/powerdnsadmin/models/domain_template_record.py +++ b/powerdnsadmin/models/domain_template_record.py @@ -39,9 +39,9 @@ def apply(self): db.session.commit() except Exception as e: current_app.logger.error( - 'Can not update domain template table. Error: {0}'.format(e)) + 'Can not update zone template table. Error: {0}'.format(e)) db.session.rollback() return { 'status': 'error', - 'msg': 'Can not update domain template table' + 'msg': 'Can not update zone template table' } diff --git a/powerdnsadmin/models/record.py b/powerdnsadmin/models/record.py index 9929b67b4..a43b057c3 100644 --- a/powerdnsadmin/models/record.py +++ b/powerdnsadmin/models/record.py @@ -46,7 +46,7 @@ def __init__(self, def get_rrsets(self, domain): """ - Query domain's rrsets via PDNS API + Query zone's rrsets via PDNS API """ headers = {'X-API-Key': self.PDNS_API_KEY} try: @@ -59,7 +59,7 @@ def get_rrsets(self, domain): verify=Setting().get('verify_ssl_connections')) except Exception as e: current_app.logger.error( - "Cannot fetch domain's record data from remote powerdns api. DETAIL: {0}" + "Cannot fetch zone's record data from remote powerdns api. DETAIL: {0}" .format(e)) return [] @@ -77,7 +77,7 @@ def get_rrsets(self, domain): def add(self, domain_name, rrset): """ - Add a record to a domain (Used by auto_ptr and DynDNS) + Add a record to a zone (Used by auto_ptr and DynDNS) Args: domain_name(str): The zone name @@ -115,7 +115,7 @@ def add(self, domain_name, rrset): return {'status': 'ok', 'msg': 'Record was added successfully'} except Exception as e: current_app.logger.error( - "Cannot add record to domain {}. Error: {}".format( + "Cannot add record to zone {}. Error: {}".format( domain_name, e)) current_app.logger.debug("Submitted record rrset: \n{}".format( utils.pretty_json(rrset))) @@ -169,12 +169,12 @@ def build_rrsets(self, domain_name, submitted_records): record['record_data'] = record['record_data'].replace('[ZONE]', domain_name) # Translate record name into punycode (IDN) as that's the only way # to convey non-ascii records to the dns server - record['record_name'] = record['record_name'].encode('idna').decode() + record['record_name'] = utils.to_idna(record["record_name"], "encode") #TODO: error handling # If the record is an alias (CNAME), we will also make sure that - # the target domain is properly converted to punycode (IDN) - if record["record_type"] == 'CNAME': - record['record_data'] = record['record_data'].encode('idna').decode() + # the target zone is properly converted to punycode (IDN) + if record['record_type'] == 'CNAME' or record['record_type'] == 'SOA': + record['record_data'] = utils.to_idna(record['record_data'], 'encode') #TODO: error handling # If it is ipv6 reverse zone and PRETTY_IPV6_PTR is enabled, # We convert ipv6 address back to reverse record format @@ -251,6 +251,7 @@ def compare(self, domain_name, submitted_records): Returns: new_rrsets(list): List of rrsets to be added del_rrsets(list): List of rrsets to be deleted + zone_has_comments(bool): True if the zone currently contains persistent comments """ # Create submitted rrsets from submitted records submitted_rrsets = self.build_rrsets(domain_name, submitted_records) @@ -266,9 +267,11 @@ def compare(self, domain_name, submitted_records): # PDNS API always return the comments with modified_at # info, we have to remove it to be able to do the dict # comparison between current and submitted rrsets + zone_has_comments = False for r in current_rrsets: for comment in r['comments']: if 'modified_at' in comment: + zone_has_comments = True del comment['modified_at'] # List of rrsets to be added @@ -290,7 +293,7 @@ def compare(self, domain_name, submitted_records): current_app.logger.debug("new_rrsets: \n{}".format(utils.pretty_json(new_rrsets))) current_app.logger.debug("del_rrsets: \n{}".format(utils.pretty_json(del_rrsets))) - return new_rrsets, del_rrsets + return new_rrsets, del_rrsets, zone_has_comments def apply_rrsets(self, domain_name, rrsets): headers = {'X-API-Key': self.PDNS_API_KEY, 'Content-Type': 'application/json'} @@ -303,87 +306,85 @@ def apply_rrsets(self, domain_name, rrsets): data=rrsets) return jdata + @staticmethod + def to_api_payload(new_rrsets, del_rrsets, comments_supported): + """Turn the given changes into a single api payload.""" + + def replace_for_api(rrset): + """Return a modified copy of the given RRset with changetype REPLACE.""" + if not rrset or rrset.get('changetype', None) != 'REPLACE': + return rrset + replace_copy = dict(rrset) + has_nonempty_comments = any(bool(c.get('content', None)) for c in replace_copy.get('comments', [])) + if not has_nonempty_comments: + if comments_supported: + replace_copy['comments'] = [] + else: + # For backends that don't support comments: Remove the attribute altogether + replace_copy.pop('comments', None) + return replace_copy + + def rrset_in(needle, haystack): + """Return whether the given RRset (identified by name and type) is in the list.""" + for hay in haystack: + if needle['name'] == hay['name'] and needle['type'] == hay['type']: + return True + return False + + def delete_for_api(rrset): + """Return a minified copy of the given RRset with changetype DELETE.""" + if not rrset or rrset.get('changetype', None) != 'DELETE': + return rrset + delete_copy = dict(rrset) + delete_copy.pop('ttl', None) + delete_copy.pop('records', None) + delete_copy.pop('comments', None) + return delete_copy + + replaces = [replace_for_api(r) for r in new_rrsets] + deletes = [delete_for_api(r) for r in del_rrsets if not rrset_in(r, replaces)] + return { + # order matters: first deletions, then additions+changes + 'rrsets': deletes + replaces + } + def apply(self, domain_name, submitted_records): """ - Apply record changes to a domain. This function - will make 2 calls to the PDNS API to DELETE and + Apply record changes to a zone. This function + will make 1 call to the PDNS API to DELETE and REPLACE records (rrsets) """ current_app.logger.debug( "submitted_records: {}".format(submitted_records)) # Get the list of rrsets to be added and deleted - new_rrsets, del_rrsets = self.compare(domain_name, submitted_records) - - # Remove blank comments from rrsets for compatibility with some backends - def remove_blank_comments(rrset): - if not rrset['comments']: - del rrset['comments'] - elif isinstance(rrset['comments'], list): - # Merge all non-blank comment values into a list - merged_comments = [ - v - for c in rrset['comments'] - for v in c.values() - if v - ] - # Delete comment if all values are blank (len(merged_comments) == 0) - if not merged_comments: - del rrset['comments'] + new_rrsets, del_rrsets, zone_has_comments = self.compare(domain_name, submitted_records) - for r in new_rrsets['rrsets']: - remove_blank_comments(r) - - for r in del_rrsets['rrsets']: - remove_blank_comments(r) + # The history logic still needs *all* the deletes with full data to display a useful diff. + # So create a "minified" copy for the api call, and return the original data back up + api_payload = self.to_api_payload(new_rrsets['rrsets'], del_rrsets['rrsets'], zone_has_comments) + current_app.logger.debug(f"api payload: \n{utils.pretty_json(api_payload)}") # Submit the changes to PDNS API try: - if del_rrsets["rrsets"]: - result = self.apply_rrsets(domain_name, del_rrsets) + if api_payload["rrsets"]: + result = self.apply_rrsets(domain_name, api_payload) if 'error' in result.keys(): current_app.logger.error( - 'Cannot apply record changes with deleting rrsets step. PDNS error: {}' + 'Cannot apply record changes. PDNS error: {}' .format(result['error'])) return { 'status': 'error', 'msg': result['error'].replace("'", "") } - if new_rrsets["rrsets"]: - result = self.apply_rrsets(domain_name, new_rrsets) - if 'error' in result.keys(): - current_app.logger.error( - 'Cannot apply record changes with adding rrsets step. PDNS error: {}' - .format(result['error'])) - - # rollback - re-add the removed record if the adding operation is failed. - if del_rrsets["rrsets"]: - rollback_rrests = del_rrsets - for r in del_rrsets["rrsets"]: - r['changetype'] = 'REPLACE' - rollback = self.apply_rrsets(domain_name, rollback_rrests) - if 'error' in rollback.keys(): - return dict(status='error', - msg='Failed to apply changes. Cannot rollback previous failed operation: {}' - .format(rollback['error'].replace("'", ""))) - else: - return dict(status='error', - msg='Failed to apply changes. Rolled back previous failed operation: {}' - .format(result['error'].replace("'", ""))) - else: - return { - 'status': 'error', - 'msg': result['error'].replace("'", "") - } - self.auto_ptr(domain_name, new_rrsets, del_rrsets) self.update_db_serial(domain_name) current_app.logger.info('Record was applied successfully.') return {'status': 'ok', 'msg': 'Record was applied successfully', 'data': (new_rrsets, del_rrsets)} except Exception as e: current_app.logger.error( - "Cannot apply record changes to domain {0}. Error: {1}".format( + "Cannot apply record changes to zone {0}. Error: {1}".format( domain_name, e)) current_app.logger.debug(traceback.format_exc()) return { @@ -428,6 +429,25 @@ def auto_ptr(self, domain_name, new_rrsets, del_rrsets): ] d = Domain() + for r in del_rrsets: + for record in r['records']: + # Format the reverse record name + # It is the reverse of forward record's content. + reverse_host_address = dns.reversename.from_address( + record['content']).to_text() + + # Create the reverse domain name in PDNS + domain_reverse_name = d.get_reverse_domain_name( + reverse_host_address) + d.create_reverse_domain(domain_name, + domain_reverse_name) + + # Delete the reverse zone + self.name = reverse_host_address + self.type = 'PTR' + self.data = record['content'] + self.delete(domain_reverse_name) + for r in new_rrsets: for record in r['records']: # Format the reverse record name @@ -461,32 +481,13 @@ def auto_ptr(self, domain_name, new_rrsets, del_rrsets): # Format the rrset rrset = {"rrsets": rrset_data} self.add(domain_reverse_name, rrset) - - for r in del_rrsets: - for record in r['records']: - # Format the reverse record name - # It is the reverse of forward record's content. - reverse_host_address = dns.reversename.from_address( - record['content']).to_text() - - # Create the reverse domain name in PDNS - domain_reverse_name = d.get_reverse_domain_name( - reverse_host_address) - d.create_reverse_domain(domain_name, - domain_reverse_name) - - # Delete the reverse zone - self.name = reverse_host_address - self.type = 'PTR' - self.data = record['content'] - self.delete(domain_reverse_name) return { 'status': 'ok', 'msg': 'Auto-PTR record was updated successfully' } except Exception as e: current_app.logger.error( - "Cannot update auto-ptr record changes to domain {0}. Error: {1}" + "Cannot update auto-ptr record changes to zone {0}. Error: {1}" .format(domain_name, e)) current_app.logger.debug(traceback.format_exc()) return { @@ -498,7 +499,7 @@ def auto_ptr(self, domain_name, new_rrsets, del_rrsets): def delete(self, domain): """ - Delete a record from domain + Delete a record from zone """ headers = {'X-API-Key': self.PDNS_API_KEY, 'Content-Type': 'application/json'} data = { @@ -523,7 +524,7 @@ def delete(self, domain): return {'status': 'ok', 'msg': 'Record was removed successfully'} except Exception as e: current_app.logger.error( - "Cannot remove record {0}/{1}/{2} from domain {3}. DETAIL: {4}" + "Cannot remove record {0}/{1}/{2} from zone {3}. DETAIL: {4}" .format(self.name, self.type, self.data, domain, e)) return { 'status': 'error', @@ -546,7 +547,7 @@ def is_allowed_delete(self): def exists(self, domain): """ - Check if record is present within domain records, and if it's present set self to found record + Check if record is present within zone records, and if it's present set self to found record """ rrsets = self.get_rrsets(domain) for r in rrsets: @@ -594,7 +595,7 @@ def update(self, domain, content): return {'status': 'ok', 'msg': 'Record was updated successfully'} except Exception as e: current_app.logger.error( - "Cannot add record {0}/{1}/{2} to domain {3}. DETAIL: {4}". + "Cannot add record {0}/{1}/{2} to zone {3}. DETAIL: {4}". format(self.name, self.type, self.data, domain, e)) return { 'status': 'error', @@ -620,11 +621,11 @@ def update_db_serial(self, domain): db.session.commit() return { 'status': True, - 'msg': 'Synced local serial for domain name {0}'.format(domain) + 'msg': 'Synced local serial for zone name {0}'.format(domain) } else: return { 'status': False, 'msg': - 'Could not find domain name {0} in local db'.format(domain) + 'Could not find zone name {0} in local db'.format(domain) } diff --git a/powerdnsadmin/models/role.py b/powerdnsadmin/models/role.py index a5cf53026..5440f3dbf 100644 --- a/powerdnsadmin/models/role.py +++ b/powerdnsadmin/models/role.py @@ -5,7 +5,7 @@ class Role(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64), index=True, unique=True) description = db.Column(db.String(128)) - users = db.relationship('User', backref='role', lazy=True) + users = db.relationship('User', back_populates='role', lazy=True) apikeys = db.relationship('ApiKey', back_populates='role', lazy=True) def __init__(self, id=None, name=None, description=None): @@ -20,4 +20,4 @@ def __init__(self, name=None, description=None): self.description = description def __repr__(self): - return ''.format(self.name) + return ''.format(self.name) diff --git a/powerdnsadmin/models/sessions.py b/powerdnsadmin/models/sessions.py new file mode 100644 index 000000000..b699a3dff --- /dev/null +++ b/powerdnsadmin/models/sessions.py @@ -0,0 +1,39 @@ +from flask import current_app, session +from flask_login import current_user +from .base import db + + +class Sessions(db.Model): + id = db.Column(db.Integer, primary_key=True) + session_id = db.Column(db.String(255), index=True, unique=True) + data = db.Column(db.BLOB) + expiry = db.Column(db.DateTime) + + def __init__(self, + id=None, + session_id=None, + data=None, + expiry=None): + self.id = id + self.session_id = session_id + self.data = data + self.expiry = expiry + + def __repr__(self): + return ''.format(self.id) + + @staticmethod + def clean_up_expired_sessions(): + """Clean up expired sessions in the database""" + from datetime import datetime + from sqlalchemy import or_ + from sqlalchemy.exc import SQLAlchemyError + + try: + db.session.query(Sessions).filter(or_(Sessions.expiry < datetime.now(), Sessions.expiry is None)).delete() + db.session.commit() + except SQLAlchemyError as e: + db.session.rollback() + current_app.logger.error(e) + return False + return True diff --git a/powerdnsadmin/models/setting.py b/powerdnsadmin/models/setting.py index 33b8db540..2bcd8c509 100644 --- a/powerdnsadmin/models/setting.py +++ b/powerdnsadmin/models/setting.py @@ -1,197 +1,19 @@ import sys import traceback - import pytimeparse from ast import literal_eval -from distutils.util import strtobool from flask import current_app - from .base import db +from powerdnsadmin.lib.settings import AppSettings class Setting(db.Model): id = db.Column(db.Integer, primary_key=True) - name = db.Column(db.String(64)) + name = db.Column(db.String(64), unique=True, index=True) value = db.Column(db.Text()) - defaults = { - 'maintenance': False, - 'fullscreen_layout': True, - 'record_helper': True, - 'login_ldap_first': True, - 'default_record_table_size': 15, - 'default_domain_table_size': 10, - 'auto_ptr': False, - 'record_quick_edit': True, - 'pretty_ipv6_ptr': False, - 'dnssec_admins_only': False, - 'allow_user_create_domain': False, - 'allow_user_remove_domain': False, - 'allow_user_view_history': False, - 'delete_sso_accounts': False, - 'bg_domain_updates': False, - 'enable_api_rr_history': True, - 'site_name': 'PowerDNS-Admin', - 'site_url': 'http://localhost:9191', - 'session_timeout': 10, - 'warn_session_timeout': True, - 'pdns_api_url': '', - 'pdns_api_key': '', - 'pdns_api_timeout': 30, - 'pdns_version': '4.1.1', - 'verify_ssl_connections': True, - 'local_db_enabled': True, - 'signup_enabled': True, - 'autoprovisioning': False, - 'urn_value':'', - 'autoprovisioning_attribute': '', - 'purge': False, - 'verify_user_email': False, - 'ldap_enabled': False, - 'ldap_type': 'ldap', - 'ldap_uri': '', - 'ldap_base_dn': '', - 'ldap_admin_username': '', - 'ldap_admin_password': '', - 'ldap_filter_basic': '', - 'ldap_filter_group': '', - 'ldap_filter_username': '', - 'ldap_filter_groupname': '', - 'ldap_sg_enabled': False, - 'ldap_admin_group': '', - 'ldap_operator_group': '', - 'ldap_user_group': '', - 'ldap_domain': '', - 'github_oauth_enabled': False, - 'github_oauth_key': '', - 'github_oauth_secret': '', - 'github_oauth_scope': 'email', - 'github_oauth_api_url': 'https://api.github.com/user', - 'github_oauth_token_url': - 'https://github.com/login/oauth/access_token', - 'github_oauth_authorize_url': - 'https://github.com/login/oauth/authorize', - 'google_oauth_enabled': False, - 'google_oauth_client_id': '', - 'google_oauth_client_secret': '', - 'google_token_url': 'https://oauth2.googleapis.com/token', - 'google_oauth_scope': 'openid email profile', - 'google_authorize_url': 'https://accounts.google.com/o/oauth2/v2/auth', - 'google_base_url': 'https://www.googleapis.com/oauth2/v3/', - 'azure_oauth_enabled': False, - 'azure_oauth_key': '', - 'azure_oauth_secret': '', - 'azure_oauth_scope': 'User.Read openid email profile', - 'azure_oauth_api_url': 'https://graph.microsoft.com/v1.0/', - 'azure_oauth_token_url': - 'https://login.microsoftonline.com/[tenancy]/oauth2/v2.0/token', - 'azure_oauth_authorize_url': - 'https://login.microsoftonline.com/[tenancy]/oauth2/v2.0/authorize', - 'azure_sg_enabled': False, - 'azure_admin_group': '', - 'azure_operator_group': '', - 'azure_user_group': '', - 'azure_group_accounts_enabled': False, - 'azure_group_accounts_name': 'displayName', - 'azure_group_accounts_name_re': '', - 'azure_group_accounts_description': 'description', - 'azure_group_accounts_description_re': '', - 'oidc_oauth_enabled': False, - 'oidc_oauth_key': '', - 'oidc_oauth_secret': '', - 'oidc_oauth_scope': 'email', - 'oidc_oauth_api_url': '', - 'oidc_oauth_token_url': '', - 'oidc_oauth_authorize_url': '', - 'oidc_oauth_logout_url': '', - 'oidc_oauth_username': 'preferred_username', - 'oidc_oauth_firstname': 'given_name', - 'oidc_oauth_last_name': 'family_name', - 'oidc_oauth_email': 'email', - 'oidc_oauth_account_name_property': '', - 'oidc_oauth_account_description_property': '', - 'forward_records_allow_edit': { - 'A': True, - 'AAAA': True, - 'AFSDB': False, - 'ALIAS': False, - 'CAA': True, - 'CERT': False, - 'CDNSKEY': False, - 'CDS': False, - 'CNAME': True, - 'DNSKEY': False, - 'DNAME': False, - 'DS': False, - 'HINFO': False, - 'KEY': False, - 'LOC': True, - 'LUA': False, - 'MX': True, - 'NAPTR': False, - 'NS': True, - 'NSEC': False, - 'NSEC3': False, - 'NSEC3PARAM': False, - 'OPENPGPKEY': False, - 'PTR': True, - 'RP': False, - 'RRSIG': False, - 'SOA': False, - 'SPF': True, - 'SSHFP': False, - 'SRV': True, - 'TKEY': False, - 'TSIG': False, - 'TLSA': False, - 'SMIMEA': False, - 'TXT': True, - 'URI': False - }, - 'reverse_records_allow_edit': { - 'A': False, - 'AAAA': False, - 'AFSDB': False, - 'ALIAS': False, - 'CAA': False, - 'CERT': False, - 'CDNSKEY': False, - 'CDS': False, - 'CNAME': False, - 'DNSKEY': False, - 'DNAME': False, - 'DS': False, - 'HINFO': False, - 'KEY': False, - 'LOC': True, - 'LUA': False, - 'MX': False, - 'NAPTR': False, - 'NS': True, - 'NSEC': False, - 'NSEC3': False, - 'NSEC3PARAM': False, - 'OPENPGPKEY': False, - 'PTR': True, - 'RP': False, - 'RRSIG': False, - 'SOA': False, - 'SPF': False, - 'SSHFP': False, - 'SRV': False, - 'TKEY': False, - 'TSIG': False, - 'TLSA': False, - 'SMIMEA': False, - 'TXT': True, - 'URI': False - }, - 'ttl_options': '1 minute,5 minutes,30 minutes,60 minutes,24 hours', - 'otp_field_enabled': True, - 'custom_css': '', - 'otp_force': False, - 'max_history_records': 1000 - } + ZONE_TYPE_FORWARD = 'forward' + ZONE_TYPE_REVERSE = 'reverse' def __init__(self, id=None, name=None, value=None): self.id = id @@ -209,7 +31,7 @@ def set_maintenance(self, mode): Setting.name == 'maintenance').first() if maintenance is None: - value = self.defaults['maintenance'] + value = AppSettings.defaults['maintenance'] maintenance = Setting(name='maintenance', value=str(value)) db.session.add(maintenance) @@ -231,7 +53,7 @@ def toggle(self, setting): current_setting = Setting.query.filter(Setting.name == setting).first() if current_setting is None: - value = self.defaults[setting] + value = AppSettings.defaults[setting] current_setting = Setting(name=setting, value=str(value)) db.session.add(current_setting) @@ -250,70 +72,79 @@ def toggle(self, setting): return False def set(self, setting, value): + import json current_setting = Setting.query.filter(Setting.name == setting).first() if current_setting is None: current_setting = Setting(name=setting, value=None) db.session.add(current_setting) - value = str(value) + value = AppSettings.convert_type(setting, value) + + if isinstance(value, dict) or isinstance(value, list): + value = json.dumps(value) try: current_setting.value = value db.session.commit() return True except Exception as e: - current_app.logger.error('Cannot edit setting {0}. DETAIL: {1}'.format( - setting, e)) + current_app.logger.error('Cannot edit setting {0}. DETAIL: {1}'.format(setting, e)) current_app.logger.debug(traceback.format_exec()) db.session.rollback() return False def get(self, setting): - if setting in self.defaults: - + if setting in AppSettings.defaults: + if setting.upper() in current_app.config: result = current_app.config[setting.upper()] else: result = self.query.filter(Setting.name == setting).first() - + if result is not None: - if hasattr(result,'value'): - result = result.value - return strtobool(result) if result in [ - 'True', 'False' - ] else result + if hasattr(result, 'value'): + result = result.value + + return AppSettings.convert_type(setting, result) else: - return self.defaults[setting] + return AppSettings.defaults[setting] else: current_app.logger.error('Unknown setting queried: {0}'.format(setting)) - + + def get_group(self, group): + if not isinstance(group, list): + group = AppSettings.groups[group] + + result = {} + + for var_name, default_value in AppSettings.defaults.items(): + if var_name in group: + result[var_name] = self.get(var_name) + + return result + def get_records_allow_to_edit(self): return list( - set(self.get_forward_records_allow_to_edit() + - self.get_reverse_records_allow_to_edit())) - - def get_forward_records_allow_to_edit(self): - records = self.get('forward_records_allow_edit') - f_records = literal_eval(records) if isinstance(records, - str) else records - r_name = [r for r in f_records if f_records[r]] - # Sort alphabetically if python version is smaller than 3.6 - if sys.version_info[0] < 3 or (sys.version_info[0] == 3 - and sys.version_info[1] < 6): - r_name.sort() - return r_name - - def get_reverse_records_allow_to_edit(self): - records = self.get('reverse_records_allow_edit') - r_records = literal_eval(records) if isinstance(records, - str) else records - r_name = [r for r in r_records if r_records[r]] + set(self.get_supported_record_types(self.ZONE_TYPE_FORWARD) + + self.get_supported_record_types(self.ZONE_TYPE_REVERSE))) + + def get_supported_record_types(self, zone_type): + setting_value = [] + + if zone_type == self.ZONE_TYPE_FORWARD: + setting_value = self.get('forward_records_allow_edit') + elif zone_type == self.ZONE_TYPE_REVERSE: + setting_value = self.get('reverse_records_allow_edit') + + records = literal_eval(setting_value) if isinstance(setting_value, str) else setting_value + types = [r for r in records if records[r]] + # Sort alphabetically if python version is smaller than 3.6 - if sys.version_info[0] < 3 or (sys.version_info[0] == 3 - and sys.version_info[1] < 6): - r_name.sort() - return r_name + if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 6): + types.sort() + + return types def get_ttl_options(self): return [(pytimeparse.parse(ttl), ttl) diff --git a/powerdnsadmin/models/user.py b/powerdnsadmin/models/user.py index 1802492a6..42f894fe1 100644 --- a/powerdnsadmin/models/user.py +++ b/powerdnsadmin/models/user.py @@ -5,6 +5,7 @@ import pyotp import ldap import ldap.filter +from collections import OrderedDict from flask import current_app from flask_login import AnonymousUserMixin from sqlalchemy import orm @@ -34,6 +35,7 @@ class User(db.Model): otp_secret = db.Column(db.String(16)) confirmed = db.Column(db.SmallInteger, nullable=False, default=0) role_id = db.Column(db.Integer, db.ForeignKey('role.id')) + role = db.relationship('Role', back_populates="users", lazy=True) accounts = None def __init__(self, @@ -83,21 +85,18 @@ def is_anonymous(self): return False def get_id(self): - try: - return unicode(self.id) # python 2 - except NameError: - return str(self.id) # python 3 + return str(self.id) def __repr__(self): return ''.format(self.username) def get_totp_uri(self): - return "otpauth://totp/PowerDNS-Admin:{0}?secret={1}&issuer=PowerDNS-Admin".format( - self.username, self.otp_secret) + return "otpauth://totp/{0}:{1}?secret={2}&issuer=PowerDNS-Admin".format( + Setting().get('site_name'), self.username, self.otp_secret) def verify_totp(self, token): totp = pyotp.TOTP(self.otp_secret) - return totp.verify(token) + return totp.verify(token, valid_window = 5) def get_hashed_password(self, plain_text_password=None): # Hash a password for the first time @@ -110,9 +109,10 @@ def get_hashed_password(self, plain_text_password=None): def check_password(self, hashed_password): # Check hashed password. Using bcrypt, the salt is saved into the hash itself - if (self.plain_text_password): - return bcrypt.checkpw(self.plain_text_password.encode('utf-8'), - hashed_password.encode('utf-8')) + if hasattr(self, "plain_text_password"): + if self.plain_text_password != None: + return bcrypt.checkpw(self.plain_text_password.encode('utf-8'), + hashed_password.encode('utf-8')) return False def get_user_info_by_id(self): @@ -128,12 +128,21 @@ def ldap_init_conn(self): conn = ldap.initialize(Setting().get('ldap_uri')) conn.set_option(ldap.OPT_REFERRALS, ldap.OPT_OFF) conn.set_option(ldap.OPT_PROTOCOL_VERSION, 3) - conn.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND) conn.set_option(ldap.OPT_X_TLS_DEMAND, True) conn.set_option(ldap.OPT_DEBUG_LEVEL, 255) conn.protocol_version = ldap.VERSION3 return conn + def escape_filter_chars(self, filter_str): + """ + Escape chars for ldap search + """ + escape_chars = ['\\', '*', '(', ')', '\x00'] + replace_chars = ['\\5c', '\\2a', '\\28', '\\29', '\\00'] + for escape_char in escape_chars: + filter_str = filter_str.replace(escape_char, replace_chars[escape_chars.index(escape_char)]) + return filter_str + def ldap_search(self, searchFilter, baseDN, retrieveAttributes=None): searchScope = ldap.SCOPE_SUBTREE @@ -256,82 +265,82 @@ def is_validate(self, method, src_ip='', trust_user=False): if LDAP_TYPE == 'ldap': groupSearchFilter = "(&({0}={1}){2})".format(LDAP_FILTER_GROUPNAME, ldap_username, LDAP_FILTER_GROUP) current_app.logger.debug('Ldap groupSearchFilter {0}'.format(groupSearchFilter)) - if (self.ldap_search(groupSearchFilter, - LDAP_ADMIN_GROUP)): + if (LDAP_ADMIN_GROUP and self.ldap_search(groupSearchFilter, LDAP_ADMIN_GROUP)): role_name = 'Administrator' current_app.logger.info( 'User {0} is part of the "{1}" group that allows admin access to PowerDNS-Admin' - .format(self.username, - LDAP_ADMIN_GROUP)) - elif (self.ldap_search(groupSearchFilter, - LDAP_OPERATOR_GROUP)): + .format(self.username, LDAP_ADMIN_GROUP)) + elif (LDAP_OPERATOR_GROUP and self.ldap_search(groupSearchFilter, LDAP_OPERATOR_GROUP)): role_name = 'Operator' current_app.logger.info( 'User {0} is part of the "{1}" group that allows operator access to PowerDNS-Admin' - .format(self.username, - LDAP_OPERATOR_GROUP)) - elif (self.ldap_search(groupSearchFilter, - LDAP_USER_GROUP)): + .format(self.username, LDAP_OPERATOR_GROUP)) + elif (LDAP_USER_GROUP and self.ldap_search(groupSearchFilter, LDAP_USER_GROUP)): current_app.logger.info( 'User {0} is part of the "{1}" group that allows user access to PowerDNS-Admin' - .format(self.username, - LDAP_USER_GROUP)) + .format(self.username, LDAP_USER_GROUP)) else: current_app.logger.error( - 'User {0} is not part of the "{1}", "{2}" or "{3}" groups that allow access to PowerDNS-Admin' - .format(self.username, - LDAP_ADMIN_GROUP, - LDAP_OPERATOR_GROUP, - LDAP_USER_GROUP)) + 'User {0} is not part of any security groups that allow access to PowerDNS-Admin' + .format(self.username)) return False elif LDAP_TYPE == 'ad': - ldap_admin_group_filter, ldap_operator_group, ldap_user_group = "", "", "" - if LDAP_ADMIN_GROUP: - ldap_admin_group_filter = "(memberOf:1.2.840.113556.1.4.1941:={0})".format(LDAP_ADMIN_GROUP) - if LDAP_OPERATOR_GROUP: - ldap_operator_group = "(memberOf:1.2.840.113556.1.4.1941:={0})".format(LDAP_OPERATOR_GROUP) - if LDAP_USER_GROUP: - ldap_user_group = "(memberOf:1.2.840.113556.1.4.1941:={0})".format(LDAP_USER_GROUP) - searchFilter = "(&({0}={1})(|{2}{3}{4}))".format(LDAP_FILTER_USERNAME, self.username, - LDAP_FILTER_GROUP, ldap_admin_group_filter, - ldap_operator_group, ldap_user_group) - ldap_result = self.ldap_search(searchFilter, LDAP_BASE_DN) - user_ad_member_of = ldap_result[0][0][1].get( - 'memberOf') - - if not user_ad_member_of: + ldap_group_security_roles = OrderedDict( + Administrator=LDAP_ADMIN_GROUP, + Operator=LDAP_OPERATOR_GROUP, + User=LDAP_USER_GROUP, + ) + user_dn = self.escape_filter_chars(ldap_result[0][0][0]) + sf_groups = "" + + for group in ldap_group_security_roles.values(): + if not group: + continue + + sf_groups += f"(distinguishedName={group})" + + sf_member_user = f"(member:1.2.840.113556.1.4.1941:={user_dn})" + search_filter = f"(&(|{sf_groups}){sf_member_user})" + current_app.logger.debug(f"LDAP groupSearchFilter '{search_filter}'") + + ldap_user_groups = [ + group[0][0] + for group in self.ldap_search( + search_filter, + LDAP_BASE_DN + ) + ] + + if not ldap_user_groups: current_app.logger.error( - 'User {0} does not belong to any group while LDAP_GROUP_SECURITY_ENABLED is ON' - .format(self.username)) + f"User '{self.username}' " + "does not belong to any group " + "while LDAP_GROUP_SECURITY_ENABLED is ON" + ) return False - user_ad_member_of = [g.decode("utf-8") for g in user_ad_member_of] + current_app.logger.debug( + "LDAP User security groups " + f"for user '{self.username}': " + " ".join(ldap_user_groups) + ) - if (LDAP_ADMIN_GROUP in user_ad_member_of): - role_name = 'Administrator' - current_app.logger.info( - 'User {0} is part of the "{1}" group that allows admin access to PowerDNS-Admin' - .format(self.username, - LDAP_ADMIN_GROUP)) - elif (LDAP_OPERATOR_GROUP in user_ad_member_of): - role_name = 'Operator' - current_app.logger.info( - 'User {0} is part of the "{1}" group that allows operator access to PowerDNS-Admin' - .format(self.username, - LDAP_OPERATOR_GROUP)) - elif (LDAP_USER_GROUP in user_ad_member_of): + for role, ldap_group in ldap_group_security_roles.items(): + # Continue when groups is not defined or + # user is'nt member of LDAP group + if not ldap_group or not ldap_group in ldap_user_groups: + continue + + role_name = role current_app.logger.info( - 'User {0} is part of the "{1}" group that allows user access to PowerDNS-Admin' - .format(self.username, - LDAP_USER_GROUP)) - else: - current_app.logger.error( - 'User {0} is not part of the "{1}", "{2}" or "{3}" groups that allow access to PowerDNS-Admin' - .format(self.username, - LDAP_ADMIN_GROUP, - LDAP_OPERATOR_GROUP, - LDAP_USER_GROUP)) - return False + f"User '{self.username}' member of " + f"the '{ldap_group}' group that allows " + f"'{role}' access to to PowerDNS-Admin" + ) + + # Stop loop on first found + break + else: current_app.logger.error('Invalid LDAP type') return False @@ -409,12 +418,12 @@ def create_local_user(self): Create local user witch stores username / password in the DB """ # check if username existed - user = User.query.filter(User.username == self.username).first() + user = User.query.filter(str(User.username).lower() == self.username.lower()).first() if user: return {'status': False, 'msg': 'Username is already in use'} # check if email existed - user = User.query.filter(User.email == self.email).first() + user = User.query.filter(str(User.email).lower() == self.email.lower()).first() if user: return {'status': False, 'msg': 'Email address is already in use'} @@ -425,8 +434,12 @@ def create_local_user(self): self.role_id = Role.query.filter_by( name='Administrator').first().id - self.password = self.get_hashed_password( - self.plain_text_password) if self.plain_text_password else '*' + if hasattr(self, "plain_text_password"): + if self.plain_text_password != None: + self.password = self.get_hashed_password( + self.plain_text_password) + else: + self.password = '*' if self.password and self.password != '*': self.password = self.password.decode("utf-8") @@ -462,9 +475,10 @@ def update_local_user(self): user.email = self.email # store new password hash (only if changed) - if self.plain_text_password: - user.password = self.get_hashed_password( - self.plain_text_password).decode("utf-8") + if hasattr(self, "plain_text_password"): + if self.plain_text_password != None: + user.password = self.get_hashed_password( + self.plain_text_password).decode("utf-8") db.session.commit() return {'status': True, 'msg': 'User updated successfully'} @@ -479,9 +493,11 @@ def update_profile(self, enable_otp=None): user.firstname = self.firstname if self.firstname else user.firstname user.lastname = self.lastname if self.lastname else user.lastname - user.password = self.get_hashed_password( - self.plain_text_password).decode( - "utf-8") if self.plain_text_password else user.password + + if hasattr(self, "plain_text_password"): + if self.plain_text_password != None: + user.password = self.get_hashed_password( + self.plain_text_password).decode("utf-8") if self.email: # Can not update to a new email that @@ -522,7 +538,7 @@ def update_confirmed(self, confirmed): def get_domains(self): """ - Get list of domains which the user is granted to have + Get list of zones which the user is granted to have access. Note: This doesn't include the permission granting from Account @@ -675,7 +691,7 @@ def updateUser(self, Entitlements): def addMissingDomain(self, autoprovision_domain, current_domains): """ - Add domain gathered by autoprovisioning to the current domains list of a user + Add domain gathered by autoprovisioning to the current zones list of a user """ from ..models.domain import Domain user = db.session.query(User).filter(User.username == self.username).first() diff --git a/powerdnsadmin/routes/__init__.py b/powerdnsadmin/routes/__init__.py index 829f11051..598b17a7b 100644 --- a/powerdnsadmin/routes/__init__.py +++ b/powerdnsadmin/routes/__init__.py @@ -1,15 +1,20 @@ -from .base import login_manager, handle_bad_request, handle_unauthorized_access, handle_access_forbidden, handle_page_not_found, handle_internal_server_error +from .base import ( + captcha, csrf, login_manager, handle_bad_request, handle_unauthorized_access, + handle_access_forbidden, handle_page_not_found, handle_internal_server_error +) from .index import index_bp from .user import user_bp from .dashboard import dashboard_bp from .domain import domain_bp from .admin import admin_bp -from .api import api_bp +from .api import api_bp, apilist_bp def init_app(app): login_manager.init_app(app) + csrf.init_app(app) + captcha.init_app(app) app.register_blueprint(index_bp) app.register_blueprint(user_bp) @@ -17,6 +22,7 @@ def init_app(app): app.register_blueprint(domain_bp) app.register_blueprint(admin_bp) app.register_blueprint(api_bp) + app.register_blueprint(apilist_bp) app.register_error_handler(400, handle_bad_request) app.register_error_handler(401, handle_unauthorized_access) diff --git a/powerdnsadmin/routes/admin.py b/powerdnsadmin/routes/admin.py index 7919c8c6d..4887a87b0 100644 --- a/powerdnsadmin/routes/admin.py +++ b/powerdnsadmin/routes/admin.py @@ -4,7 +4,8 @@ import re from base64 import b64encode from ast import literal_eval -from flask import Blueprint, render_template, render_template_string, make_response, url_for, current_app, request, redirect, jsonify, abort, flash, session +from flask import Blueprint, render_template, render_template_string, make_response, url_for, current_app, request, \ + redirect, jsonify, abort, flash, session from flask_login import login_required, current_user from ..decorators import operator_role_required, admin_role_required, history_access_required @@ -23,6 +24,7 @@ from ..models.api_key import ApiKey from ..models.base import db +from ..lib.errors import ApiKeyCreateFail from ..lib.schema import ApiPlainKeySchema apikey_plain_schema = ApiPlainKeySchema(many=True) @@ -32,164 +34,180 @@ template_folder='templates', url_prefix='/admin') -""" -changeSet is a list of tuples, in the following format -(old_state, new_state, change_type) - -old_state: dictionary with "disabled" and "content" keys. {"disabled" : False, "content" : "1.1.1.1" } -new_state: similarly -change_type: "addition" or "deletion" or "status" for status change or "unchanged" for no change - -Note: A change in "content", is considered a deletion and recreation of the same record, -holding the new content value. -""" -def get_record_changes(del_rrest, add_rrest): - changeSet = [] - delSet = del_rrest['records'] if 'records' in del_rrest else [] - addSet = add_rrest['records'] if 'records' in add_rrest else [] - for d in delSet: # get the deletions and status changes - exists = False - for a in addSet: - if d['content'] == a['content']: - exists = True - if d['disabled'] != a['disabled']: - changeSet.append( ({"disabled":d['disabled'],"content":d['content']}, - {"disabled":a['disabled'],"content":a['content']}, - "status") ) - break - if not exists: # deletion - changeSet.append( ({"disabled":d['disabled'],"content":d['content']}, - None, - "deletion") ) - - for a in addSet: # get the additions - exists = False - for d in delSet: - if d['content'] == a['content']: - exists = True - # already checked for status change - break - if not exists: - changeSet.append( (None, {"disabled":a['disabled'], "content":a['content']}, "addition") ) - continue +def get_record_changes(del_rrset, add_rrset): + """Use the given deleted and added RRset to build a list of record changes. + + Args: + del_rrset: The RRset with changetype DELETE, or None + add_rrset: The RRset with changetype REPLACE, or None + + Returns: + A list of tuples in the format `(old_state, new_state, change_type)`. `old_state` and + `new_state` are dictionaries with the keys "disabled", "content" and "comment". + `change_type` can be "addition", "deletion", "edit" or "unchanged". When it's "addition" + then `old_state` is None, when it's "deletion" then `new_state` is None. + """ + + def get_records(rrset): + """For the given RRset return a combined list of records and comments.""" + if not rrset or 'records' not in rrset: + return [] + records = [dict(record) for record in rrset['records']] + for i, record in enumerate(records): + if 'comments' in rrset and len(rrset['comments']) > i: + record['comment'] = rrset['comments'][i].get('content', None) + else: + record['comment'] = None + return records + + def record_is_unchanged(old, new): + """Returns True if the old record is not different from the new one.""" + if old['content'] != new['content']: + raise ValueError("Can't compare records with different content") + # check everything except the content + return old['disabled'] == new['disabled'] and old['comment'] == new['comment'] + + def to_state(record): + """For the given record, return the state dict.""" + return { + "disabled": record['disabled'], + "content": record['content'], + "comment": record.get('comment', ''), + } - for a in addSet: # get the unchanged - exists = False - for c in changeSet: - if c[1] != None and c[1]["content"] == a['content']: - exists = True + add_records = get_records(add_rrset) + del_records = get_records(del_rrset) + changeset = [] + + for add_record in add_records: + for del_record in list(del_records): + if add_record['content'] == del_record['content']: + # either edited or unchanged + if record_is_unchanged(del_record, add_record): + # unchanged + changeset.append((to_state(del_record), to_state(add_record), "unchanged")) + else: + # edited + changeset.append((to_state(del_record), to_state(add_record), "edit")) + del_records.remove(del_record) break - if not exists: - changeSet.append( ( {"disabled":a['disabled'], "content":a['content']}, {"disabled":a['disabled'], "content":a['content']}, "unchanged") ) + else: # not mis-indented, else block for the del_records for loop + # addition + changeset.append((None, to_state(add_record), "addition")) - return changeSet + # Because the first loop removed edit/unchanged records from the del_records list, + # it now only contains real deletions. + for del_record in del_records: + changeset.append((to_state(del_record), None, "deletion")) -# out_changes is a list of HistoryRecordEntry objects in which we will append the new changes -# a HistoryRecordEntry represents a pair of add_rrest and del_rrest -def extract_changelogs_from_a_history_entry(out_changes, history_entry, change_num, record_name=None, record_type=None): + # Sort them by the old content. For Additions the new state will be used. + changeset.sort(key=lambda change: change[0]['content'] if change[0] else change[1]['content']) - if history_entry.detail is None: - return + return changeset - if "add_rrests" in history_entry.detail: - detail_dict = json.loads(history_entry.detail) - else: # not a record entry - return - add_rrests = detail_dict['add_rrests'] - del_rrests = detail_dict['del_rrests'] +def filter_rr_list_by_name_and_type(rrset, record_name, record_type): + return list(filter(lambda rr: rr['name'] == record_name and rr['type'] == record_type, rrset)) - for add_rrest in add_rrests: - exists = False - for del_rrest in del_rrests: - if del_rrest['name'] == add_rrest['name'] and del_rrest['type'] == add_rrest['type']: - exists = True - if change_num not in out_changes: - out_changes[change_num] = [] - out_changes[change_num].append(HistoryRecordEntry(history_entry, del_rrest, add_rrest, "*")) - break - if not exists: # this is a new record - if change_num not in out_changes: - out_changes[change_num] = [] - out_changes[change_num].append(HistoryRecordEntry(history_entry, [], add_rrest, "+")) # (add_rrest, del_rrest, change_type) - for del_rrest in del_rrests: - exists = False - for add_rrest in add_rrests: - if del_rrest['name'] == add_rrest['name'] and del_rrest['type'] == add_rrest['type']: - exists = True # no need to add in the out_changes set - break - if not exists: # this is a deletion - if change_num not in out_changes: - out_changes[change_num] = [] - out_changes[change_num].append(HistoryRecordEntry(history_entry, del_rrest, [], "-")) +# out_changes is a list of HistoryRecordEntry objects in which we will append the new changes +# a HistoryRecordEntry represents a pair of add_rrset and del_rrset +def extract_changelogs_from_history(histories, record_name=None, record_type=None): + out_changes = [] + for entry in histories: + changes = [] - # only used for changelog per record - if record_name != None and record_type != None: # then get only the records with the specific (record_name, record_type) tuple - if change_num in out_changes: - changes_i = out_changes[change_num] - else: - return - for hre in changes_i: # for each history record entry in changes_i - if 'type' in hre.add_rrest and hre.add_rrest['name'] == record_name and hre.add_rrest['type'] == record_type: + if entry.detail is None: + continue + + if "add_rrsets" in entry.detail: + details = json.loads(entry.detail) + if not details['add_rrsets'] and not details['del_rrsets']: continue - elif 'type' in hre.del_rrest and hre.del_rrest['name'] == record_name and hre.del_rrest['type'] == record_type: + else: # not a record entry + continue + + # filter only the records with the specific record_name, record_type + if record_name != None and record_type != None: + details['add_rrsets'] = list( + filter_rr_list_by_name_and_type(details['add_rrsets'], record_name, record_type)) + details['del_rrsets'] = list( + filter_rr_list_by_name_and_type(details['del_rrsets'], record_name, record_type)) + + if not details['add_rrsets'] and not details['del_rrsets']: continue - else: - out_changes[change_num].remove(hre) + # same record name and type RR are being deleted and created in same entry. + del_add_changes = set([(r['name'], r['type']) for r in details['add_rrsets']]).intersection( + [(r['name'], r['type']) for r in details['del_rrsets']]) + for del_add_change in del_add_changes: + changes.append(HistoryRecordEntry( + entry, + filter_rr_list_by_name_and_type(details['del_rrsets'], del_add_change[0], del_add_change[1]).pop(0), + filter_rr_list_by_name_and_type(details['add_rrsets'], del_add_change[0], del_add_change[1]).pop(0), + "*") + ) + + for rrset in details['add_rrsets']: + if (rrset['name'], rrset['type']) not in del_add_changes: + changes.append(HistoryRecordEntry(entry, {}, rrset, "+")) + + for rrset in details['del_rrsets']: + if (rrset['name'], rrset['type']) not in del_add_changes: + changes.append(HistoryRecordEntry(entry, rrset, {}, "-")) + + # sort changes by the record name + if changes: + changes.sort(key=lambda change: + change.del_rrset['name'] if change.del_rrset else change.add_rrset['name'] + ) + out_changes.extend(changes) + return out_changes # records with same (name,type) are considered as a single HistoryRecordEntry # history_entry is of type History - used to extract created_by and created_on -# add_rrest is a dictionary of replace -# del_rrest is a dictionary of remove +# add_rrset is a dictionary of replace +# del_rrset is a dictionary of remove class HistoryRecordEntry: - def __init__(self, history_entry, del_rrest, add_rrest, change_type): - # search the add_rrest index into the add_rrest set for the key (name, type) + def __init__(self, history_entry, del_rrset, add_rrset, change_type): + # search the add_rrset index into the add_rrset set for the key (name, type) self.history_entry = history_entry - self.add_rrest = add_rrest - self.del_rrest = del_rrest + self.add_rrset = add_rrset + self.del_rrset = del_rrset self.change_type = change_type # "*": edit or unchanged, "+" new tuple(name,type), "-" deleted (name,type) tuple - self.changed_fields = [] # contains a subset of : [ttl, name, type] - self.changeSet = [] # all changes for the records of this add_rrest-del_rrest pair - + self.changed_fields = [] # contains a subset of : [ttl, name, type] + self.changeSet = [] # all changes for the records of this add_rrset-del_rrset pair - if change_type == "+": # addition + if change_type == "+" or change_type == "-": self.changed_fields.append("name") self.changed_fields.append("type") self.changed_fields.append("ttl") - self.changeSet = get_record_changes(del_rrest, add_rrest) - elif change_type == "-": # removal - self.changed_fields.append("name") - self.changed_fields.append("type") - self.changed_fields.append("ttl") - self.changeSet = get_record_changes(del_rrest, add_rrest) elif change_type == "*": # edit of unchanged - if add_rrest['ttl'] != del_rrest['ttl']: + if add_rrset['ttl'] != del_rrset['ttl']: self.changed_fields.append("ttl") - self.changeSet = get_record_changes(del_rrest, add_rrest) - + self.changeSet = get_record_changes(del_rrset, add_rrset) def toDict(self): return { - "add_rrest" : self.add_rrest, - "del_rrest" : self.del_rrest, - "changed_fields" : self.changed_fields, - "created_on" : self.history_entry.created_on, - "created_by" : self.history_entry.created_by, - "change_type" : self.change_type, - "changeSet" : self.changeSet + "add_rrset": self.add_rrset, + "del_rrset": self.del_rrset, + "changed_fields": self.changed_fields, + "created_on": self.history_entry.created_on, + "created_by": self.history_entry.created_by, + "change_type": self.change_type, + "changeSet": self.changeSet } - def __eq__(self, obj2): # used for removal of objects from a list + def __eq__(self, obj2): # used for removal of objects from a list return True if obj2.toDict() == self.toDict() else False + @admin_bp.before_request def before_request(): # Manage session timeout @@ -197,15 +215,14 @@ def before_request(): # current_app.permanent_session_lifetime = datetime.timedelta( # minutes=int(Setting().get('session_timeout'))) current_app.permanent_session_lifetime = datetime.timedelta( - minutes=int(Setting().get('session_timeout'))) + minutes=int(Setting().get('session_timeout'))) session.modified = True - -@admin_bp.route('/pdns', methods=['GET']) +@admin_bp.route('/server/statistics', methods=['GET']) @login_required @operator_role_required -def pdns_stats(): +def server_statistics(): if not Setting().get('pdns_api_url') or not Setting().get( 'pdns_api_key') or not Setting().get('pdns_version'): return redirect(url_for('admin.setting_pdns')) @@ -214,7 +231,6 @@ def pdns_stats(): users = User.query.all() server = Server(server_id='localhost') - configs = server.get_config() statistics = server.get_statistic() history_number = History.query.count() @@ -225,15 +241,36 @@ def pdns_stats(): else: uptime = 0 - return render_template('admin_pdns_stats.html', + return render_template('admin_server_statistics.html', domains=domains, users=users, - configs=configs, statistics=statistics, uptime=uptime, history_number=history_number) +@admin_bp.route('/server/configuration', methods=['GET']) +@login_required +@operator_role_required +def server_configuration(): + if not Setting().get('pdns_api_url') or not Setting().get( + 'pdns_api_key') or not Setting().get('pdns_version'): + return redirect(url_for('admin.setting_pdns')) + + domains = Domain.query.all() + users = User.query.all() + + server = Server(server_id='localhost') + configs = server.get_config() + history_number = History.query.count() + + return render_template('admin_server_configuration.html', + domains=domains, + users=users, + configs=configs, + history_number=history_number) + + @admin_bp.route('/user/edit/', methods=['GET', 'POST']) @admin_bp.route('/user/edit', methods=['GET', 'POST']) @login_required @@ -295,6 +332,7 @@ def edit_user(user_username=None): create=create, error=result['msg']) + @admin_bp.route('/key/edit/', methods=['GET', 'POST']) @admin_bp.route('/key/edit', methods=['GET', 'POST']) @login_required @@ -349,26 +387,26 @@ def edit_key(key_id=None): plain_key = apikey_plain_schema.dump([apikey])[0]["plain_key"] plain_key = b64encode(plain_key.encode('utf-8')).decode('utf-8') - history_message = "Created API key {0}".format(apikey.id) + history_message = "Created API key {0}".format(apikey.id) # Update existing apikey else: try: if role != "User": domain_list, account_list = [], [] - apikey.update(role,description,domain_list, account_list) - history_message = "Updated API key {0}".format(apikey.id) + apikey.update(role, description, domain_list, account_list) + history_message = "Updated API key {0}".format(apikey.id) except Exception as e: current_app.logger.error('Error: {0}'.format(e)) history = History(msg=history_message, - detail = json.dumps({ - 'key': apikey.id, - 'role': apikey.role.name, - 'description': apikey.description, - 'domains': [domain.name for domain in apikey.domains], - 'accounts': [a.name for a in apikey.accounts] - }), + detail=json.dumps({ + 'key': apikey.id, + 'role': apikey.role.name, + 'description': apikey.description, + 'domains': [domain.name for domain in apikey.domains], + 'accounts': [a.name for a in apikey.accounts] + }), created_by=current_user.username) history.add() @@ -380,6 +418,7 @@ def edit_key(key_id=None): create=create, plain_key=plain_key) + @admin_bp.route('/manage-keys', methods=['GET', 'POST']) @login_required @operator_role_required @@ -392,7 +431,7 @@ def manage_keys(): abort(500) return render_template('admin_manage_keys.html', - keys=apikeys) + keys=apikeys) elif request.method == 'POST': jdata = request.json @@ -403,7 +442,7 @@ def manage_keys(): history_apikey_id = apikey.id history_apikey_role = apikey.role.name history_apikey_description = apikey.description - history_apikey_domains = [ domain.name for domain in apikey.domains] + history_apikey_domains = [domain.name for domain in apikey.domains] apikey.delete() except Exception as e: @@ -411,20 +450,21 @@ def manage_keys(): current_app.logger.info('Delete API key {0}'.format(apikey.id)) history = History(msg='Delete API key {0}'.format(apikey.id), - detail = json.dumps({ - 'key': history_apikey_id, - 'role': history_apikey_role, - 'description': history_apikey_description, - 'domains': history_apikey_domains - }), + detail=json.dumps({ + 'key': history_apikey_id, + 'role': history_apikey_role, + 'description': history_apikey_description, + 'domains': history_apikey_domains + }), created_by=current_user.username) history.add() return make_response( - jsonify({ - 'status': 'ok', - 'msg': 'Key has been removed.' - }), 200) + jsonify({ + 'status': 'ok', + 'msg': 'Key has been removed.' + }), 200) + @admin_bp.route('/manage-user', methods=['GET', 'POST']) @login_required @@ -458,17 +498,17 @@ def manage_user(): return make_response( jsonify({ 'status': - 'ok', + 'ok', 'msg': - 'Two factor authentication has been disabled for user.' + 'Two factor authentication has been disabled for user.' }), 200) else: return make_response( jsonify({ 'status': - 'error', + 'error', 'msg': - 'Cannot disable two factor authentication for user.' + 'Cannot disable two factor authentication for user.' }), 500) elif jdata['action'] == 'delete_user': @@ -548,18 +588,18 @@ def manage_user(): return make_response( jsonify({ 'status': - 'error', + 'error', 'msg': - 'You do not have permission to change Administrator users role.' + 'You do not have permission to change Administrator users role.' }), 400) if role_name == 'Administrator' and current_user.role.name != 'Administrator': return make_response( jsonify({ 'status': - 'error', + 'error', 'msg': - 'You do not have permission to promote a user to Administrator role.' + 'You do not have permission to promote a user to Administrator role.' }), 400) user = User(username=username) @@ -579,10 +619,10 @@ def manage_user(): return make_response( jsonify({ 'status': - 'error', + 'error', 'msg': - 'Cannot change user role. {0}'.format( - result['msg']) + 'Cannot change user role. {0}'.format( + result['msg']) }), 500) else: return make_response( @@ -597,9 +637,9 @@ def manage_user(): return make_response( jsonify({ 'status': - 'error', + 'error', 'msg': - 'There is something wrong, please contact Administrator.' + 'There is something wrong, please contact Administrator.' }), 400) @@ -609,14 +649,21 @@ def manage_user(): @operator_role_required def edit_account(account_name=None): users = User.query.all() + account = Account.query.filter( + Account.name == account_name).first() + all_accounts = Account.query.all() + accounts = {acc.id: acc for acc in all_accounts} + domains = Domain.query.all() if request.method == 'GET': - if account_name is None: + if account_name is None or not account: return render_template('admin_edit_account.html', + account=None, account_user_ids=[], users=users, + domains=domains, + accounts=accounts, create=1) - else: account = Account.query.filter( Account.name == account_name).first() @@ -625,11 +672,14 @@ def edit_account(account_name=None): account=account, account_user_ids=account_user_ids, users=users, + domains=domains, + accounts=accounts, create=0) if request.method == 'POST': fdata = request.form new_user_list = request.form.getlist('account_multi_user') + new_domain_list = request.form.getlist('account_domains') # on POST, synthesize account and account_user_ids from form data if not account_name: @@ -653,6 +703,8 @@ def edit_account(account_name=None): account=account, account_user_ids=account_user_ids, users=users, + domains=domains, + accounts=accounts, create=create, invalid_accountname=True) @@ -661,19 +713,33 @@ def edit_account(account_name=None): account=account, account_user_ids=account_user_ids, users=users, + domains=domains, + accounts=accounts, create=create, duplicate_accountname=True) result = account.create_account() - history = History(msg='Create account {0}'.format(account.name), - created_by=current_user.username) - else: result = account.update_account() - history = History(msg='Update account {0}'.format(account.name), - created_by=current_user.username) if result['status']: + account = Account.query.filter( + Account.name == account_name).first() + old_domains = Domain.query.filter(Domain.account_id == account.id).all() + + for domain_name in new_domain_list: + domain = Domain.query.filter( + Domain.name == domain_name).first() + if account.id != domain.account_id: + Domain(name=domain_name).assoc_account(account.id) + + for domain in old_domains: + if domain.name not in new_domain_list: + Domain(name=domain.name).assoc_account(None) + + history = History(msg='{0} account {1}'.format('Create' if create else 'Update', account.name), + created_by=current_user.username) + account.grant_privileges(new_user_list) history.add() return redirect(url_for('admin.manage_account')) @@ -747,9 +813,9 @@ def manage_account(): return make_response( jsonify({ 'status': - 'error', + 'error', 'msg': - 'There is something wrong, please contact Administrator.' + 'There is something wrong, please contact Administrator.' }), 400) @@ -765,68 +831,73 @@ def __init__(self, history, change_set): detail_dict = json.loads(history.detail) - if 'domain_type' in detail_dict and 'account_id' in detail_dict: # this is a domain creation + if 'domain_type' in detail_dict and 'account_id' in detail_dict: # this is a zone creation self.detailed_msg = render_template_string(""" - +
Domain type:{{ domaintype }}
Zone Type:{{ domaintype }}
Account:{{ account }}
""", - domaintype=detail_dict['domain_type'], - account=Account.get_name_by_id(self=None, account_id=detail_dict['account_id']) if detail_dict['account_id'] != "0" else "None") + domaintype=detail_dict['domain_type'], + account=Account.get_name_by_id(self=None, account_id=detail_dict[ + 'account_id']) if detail_dict[ + 'account_id'] != "0" else "None") - elif 'authenticator' in detail_dict: # this is a user authentication + elif 'authenticator' in detail_dict: # this is a user authentication self.detailed_msg = render_template_string(""" - - +
+ - + + + + + + - - - + - - + +
-

User {{ username }} authentication {{ auth_result }}

-
Username:{{ username }}
Authentication Result:{{ auth_result }}
Authenticator Type:{{ authenticator }}{{ authenticator }}
IP Address{{ ip_address }}IP Address:{{ ip_address }}
""", - background_rgba="68,157,68" if detail_dict['success'] == 1 else "201,48,44", - username=detail_dict['username'], - auth_result="success" if detail_dict['success'] == 1 else "failure", - authenticator=detail_dict['authenticator'], - ip_address=detail_dict['ip_address']) - - elif 'add_rrests' in detail_dict: # this is a domain record change - # changes_set = [] + background_rgba="68,157,68" if detail_dict[ + 'success'] == 1 else "201,48,44", + username=detail_dict['username'], + auth_result="success" if detail_dict[ + 'success'] == 1 else "failure", + authenticator=detail_dict['authenticator'], + ip_address=detail_dict['ip_address']) + + elif 'add_rrsets' in detail_dict: # this is a zone record change self.detailed_msg = "" - # extract_changelogs_from_a_history_entry(changes_set, history, 0) - elif 'name' in detail_dict and 'template' in history.msg: # template creation / deletion + elif 'name' in detail_dict and 'template' in history.msg: # template creation / deletion self.detailed_msg = render_template_string("""
Template name:{{ template_name }}
Description:{{ description }}
""", - template_name=DetailedHistory.get_key_val(detail_dict, "name"), - description=DetailedHistory.get_key_val(detail_dict, "description")) + template_name=DetailedHistory.get_key_val(detail_dict, "name"), + description=DetailedHistory.get_key_val(detail_dict, + "description")) - elif 'Change domain' in history.msg and 'access control' in history.msg: # added or removed a user from a domain + elif any(msg in history.msg for msg in ['Change zone', + 'Change domain']) and 'access control' in history.msg: # added or removed a user from a zone users_with_access = DetailedHistory.get_key_val(detail_dict, "user_has_access") self.detailed_msg = render_template_string(""" - +
Users with access to this domain{{ users_with_access }}
Users with access to this zone{{ users_with_access }}
Number of users:{{ users_with_access | length }}
""", - users_with_access=users_with_access) + users_with_access=users_with_access) elif 'Created API key' in history.msg or 'Updated API key' in history.msg: self.detailed_msg = render_template_string(""" @@ -834,15 +905,18 @@ def __init__(self, history, change_set): Key: {{ keyname }} Role:{{ rolename }} Description:{{ description }} - Accessible domains with this API key:{{ linked_domains }} + Accessible zones with this API key:{{ linked_domains }} Accessible accounts with this API key:{{ linked_accounts }} """, - keyname=DetailedHistory.get_key_val(detail_dict, "key"), - rolename=DetailedHistory.get_key_val(detail_dict, "role"), - description=DetailedHistory.get_key_val(detail_dict, "description"), - linked_domains=DetailedHistory.get_key_val(detail_dict, "domains" if "domains" in detail_dict else "domain_acl"), - linked_accounts=DetailedHistory.get_key_val(detail_dict, "accounts")) + keyname=DetailedHistory.get_key_val(detail_dict, "key"), + rolename=DetailedHistory.get_key_val(detail_dict, "role"), + description=DetailedHistory.get_key_val(detail_dict, + "description"), + linked_domains=DetailedHistory.get_key_val(detail_dict, + "domains" if "domains" in detail_dict else "domain_acl"), + linked_accounts=DetailedHistory.get_key_val(detail_dict, + "accounts")) elif 'Delete API key' in history.msg: self.detailed_msg = render_template_string(""" @@ -850,35 +924,39 @@ def __init__(self, history, change_set): Key: {{ keyname }} Role:{{ rolename }} Description:{{ description }} - Accessible domains with this API key:{{ linked_domains }} + Accessible zones with this API key:{{ linked_domains }} """, - keyname=DetailedHistory.get_key_val(detail_dict, "key"), - rolename=DetailedHistory.get_key_val(detail_dict, "role"), - description=DetailedHistory.get_key_val(detail_dict, "description"), - linked_domains=DetailedHistory.get_key_val(detail_dict, "domains")) - - elif 'Update type for domain' in history.msg: + keyname=DetailedHistory.get_key_val(detail_dict, "key"), + rolename=DetailedHistory.get_key_val(detail_dict, "role"), + description=DetailedHistory.get_key_val(detail_dict, + "description"), + linked_domains=DetailedHistory.get_key_val(detail_dict, + "domains")) + + elif any(msg in history.msg for msg in ['Update type for zone', 'Update type for domain']): self.detailed_msg = render_template_string(""" - - + +
Domain: {{ domain }}
Domain type:{{ domain_type }}
Zone: {{ domain }}
Zone type:{{ domain_type }}
Masters:{{ masters }}
""", - domain=DetailedHistory.get_key_val(detail_dict, "domain"), - domain_type=DetailedHistory.get_key_val(detail_dict, "type"), - masters=DetailedHistory.get_key_val(detail_dict, "masters")) + domain=DetailedHistory.get_key_val(detail_dict, "domain"), + domain_type=DetailedHistory.get_key_val(detail_dict, "type"), + masters=DetailedHistory.get_key_val(detail_dict, "masters")) elif 'reverse' in history.msg: self.detailed_msg = render_template_string(""" - - + +
Domain Type: {{ domain_type }}
Domain Master IPs:{{ domain_master_ips }}
Zone Type: {{ domain_type }}
Zone Master IPs:{{ domain_master_ips }}
""", - domain_type=DetailedHistory.get_key_val(detail_dict, "domain_type"), - domain_master_ips=DetailedHistory.get_key_val(detail_dict, "domain_master_ips")) + domain_type=DetailedHistory.get_key_val(detail_dict, + "domain_type"), + domain_master_ips=DetailedHistory.get_key_val(detail_dict, + "domain_master_ips")) elif DetailedHistory.get_key_val(detail_dict, 'msg') and DetailedHistory.get_key_val(detail_dict, 'status'): self.detailed_msg = render_template_string(''' @@ -887,8 +965,22 @@ def __init__(self, history, change_set): Message:{{ history_msg }} ''', - history_status=DetailedHistory.get_key_val(detail_dict, 'status'), - history_msg=DetailedHistory.get_key_val(detail_dict, 'msg')) + history_status=DetailedHistory.get_key_val(detail_dict, + 'status'), + history_msg=DetailedHistory.get_key_val(detail_dict, 'msg')) + + elif any(msg in history.msg for msg in ['Update zone', + 'Update domain']) and 'associate account' in history.msg: # When an account gets associated or dissociate with zones + self.detailed_msg = render_template_string(''' + + + +
Associate: {{ history_assoc_account }}
Dissociate:{{ history_dissoc_account }}
+ ''', + history_assoc_account=DetailedHistory.get_key_val(detail_dict, + 'assoc_account'), + history_dissoc_account=DetailedHistory.get_key_val(detail_dict, + 'dissoc_account')) # check for lower key as well for old databases @staticmethod @@ -898,379 +990,411 @@ def get_key_val(_dict, key): # convert a list of History objects into DetailedHistory objects def convert_histories(histories): - changes_set = dict() - detailedHistories = [] - j = 0 - for i in range(len(histories)): - if histories[i].detail and ('add_rrests' in histories[i].detail or 'del_rrests' in histories[i].detail): - extract_changelogs_from_a_history_entry(changes_set, histories[i], j) - if j in changes_set: - detailedHistories.append(DetailedHistory(histories[i], changes_set[j])) - else: # no changes were found - detailedHistories.append(DetailedHistory(histories[i], None)) - j += 1 - - else: - detailedHistories.append(DetailedHistory(histories[i], None)) - return detailedHistories + detailedHistories = [] + for history in histories: + if history.detail and ('add_rrsets' in history.detail or 'del_rrsets' in history.detail): + detailedHistories.append(DetailedHistory(history, extract_changelogs_from_history([history]))) + else: + detailedHistories.append(DetailedHistory(history, None)) + return detailedHistories + @admin_bp.route('/history', methods=['GET', 'POST']) @login_required @history_access_required def history(): - if request.method == 'POST': - if current_user.role.name != 'Administrator': - return make_response( - jsonify({ - 'status': 'error', - 'msg': 'You do not have permission to remove history.' - }), 401) - - h = History() - result = h.remove_all() - if result: - history = History(msg='Remove all histories', - created_by=current_user.username) - history.add() - return make_response( - jsonify({ - 'status': 'ok', - 'msg': 'Changed user role successfully.' - }), 200) - else: - return make_response( - jsonify({ - 'status': 'error', - 'msg': 'Can not remove histories.' - }), 500) - - - if request.method == 'GET': - doms = accounts = users = "" - if current_user.role.name in [ 'Administrator', 'Operator']: - all_domain_names = Domain.query.all() - all_account_names = Account.query.all() - all_user_names = User.query.all() - - - - for d in all_domain_names: - doms += d.name + " " - for acc in all_account_names: - accounts += acc.name + " " - for usr in all_user_names: - users += usr.username + " " - else: # special autocomplete for users - all_domain_names = db.session.query(Domain) \ - .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ - .outerjoin(Account, Domain.account_id == Account.id) \ - .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ - .filter( - db.or_( - DomainUser.user_id == current_user.id, - AccountUser.user_id == current_user.id - )).all() - - all_account_names = db.session.query(Account) \ - .outerjoin(Domain, Domain.account_id == Account.id) \ - .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ - .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ - .filter( - db.or_( - DomainUser.user_id == current_user.id, - AccountUser.user_id == current_user.id - )).all() - - - all_user_names = [] - for a in all_account_names: - temp = db.session.query(User) \ - .join(AccountUser, AccountUser.user_id == User.id) \ - .outerjoin(Account, Account.id == AccountUser.account_id) \ - .filter( - db.or_( - Account.id == a.id, - AccountUser.account_id == a.id - ) - ) \ - .all() - for u in temp: - if u in all_user_names: - continue - all_user_names.append(u) - - for d in all_domain_names: - doms += d.name + " " - - for a in all_account_names: - accounts += a.name + " " - for u in all_user_names: - users += u.username + " " - return render_template('admin_history.html', all_domain_names=doms, all_account_names=accounts, all_usernames=users) + if request.method == 'POST': + if current_user.role.name != 'Administrator': + return make_response( + jsonify({ + 'status': 'error', + 'msg': 'You do not have permission to remove history.' + }), 401) + + if Setting().get('preserve_history'): + return make_response( + jsonify({ + 'status': 'error', + 'msg': 'History removal is not allowed (toggle preserve_history in settings).' + }), 401) + + h = History() + result = h.remove_all() + if result: + history = History(msg='Remove all histories', + created_by=current_user.username) + history.add() + return make_response( + jsonify({ + 'status': 'ok', + 'msg': 'Changed user role successfully.' + }), 200) + else: + return make_response( + jsonify({ + 'status': 'error', + 'msg': 'Can not remove histories.' + }), 500) + + if request.method == 'GET': + doms = accounts = users = "" + if current_user.role.name in ['Administrator', 'Operator']: + all_domain_names = Domain.query.all() + all_account_names = Account.query.all() + all_user_names = User.query.all() + + for d in all_domain_names: + doms += d.name + " " + for acc in all_account_names: + accounts += acc.name + " " + for usr in all_user_names: + users += usr.username + " " + else: # special autocomplete for users + all_domain_names = db.session.query(Domain) \ + .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ + .outerjoin(Account, Domain.account_id == Account.id) \ + .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ + .filter( + db.or_( + DomainUser.user_id == current_user.id, + AccountUser.user_id == current_user.id + )).all() + + all_account_names = db.session.query(Account) \ + .outerjoin(Domain, Domain.account_id == Account.id) \ + .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ + .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ + .filter( + db.or_( + DomainUser.user_id == current_user.id, + AccountUser.user_id == current_user.id + )).all() + + all_user_names = [] + for a in all_account_names: + temp = db.session.query(User) \ + .join(AccountUser, AccountUser.user_id == User.id) \ + .outerjoin(Account, Account.id == AccountUser.account_id) \ + .filter( + db.or_( + Account.id == a.id, + AccountUser.account_id == a.id + ) + ) \ + .all() + for u in temp: + if u in all_user_names: + continue + all_user_names.append(u) + + for d in all_domain_names: + doms += d.name + " " + + for a in all_account_names: + accounts += a.name + " " + for u in all_user_names: + users += u.username + " " + return render_template('admin_history.html', all_domain_names=doms, all_account_names=accounts, + all_usernames=users) + # local_offset is the offset of the utc to the local time # offset must be int # return the date converted and simplified def from_utc_to_local(local_offset, timeframe): - offset = str(local_offset *(-1)) - date_split = str(timeframe).split(".")[0] - date_converted = datetime.datetime.strptime(date_split, '%Y-%m-%d %H:%M:%S') + datetime.timedelta(minutes=int(offset)) - return date_converted + offset = str(local_offset * (-1)) + date_split = str(timeframe).split(".")[0] + date_converted = datetime.datetime.strptime(date_split, '%Y-%m-%d %H:%M:%S') + datetime.timedelta( + minutes=int(offset)) + return date_converted + @admin_bp.route('/history_table', methods=['GET', 'POST']) @login_required @history_access_required -def history_table(): # ajax call data - - if request.method == 'POST': - if current_user.role.name != 'Administrator': - return make_response( - jsonify({ - 'status': 'error', - 'msg': 'You do not have permission to remove history.' - }), 401) - - h = History() - result = h.remove_all() - if result: - history = History(msg='Remove all histories', - created_by=current_user.username) - history.add() - return make_response( - jsonify({ - 'status': 'ok', - 'msg': 'Changed user role successfully.' - }), 200) - else: - return make_response( - jsonify({ - 'status': 'error', - 'msg': 'Can not remove histories.' - }), 500) - - detailedHistories = [] - lim = int(Setting().get('max_history_records')) # max num of records - - if request.method == 'GET': - if current_user.role.name in [ 'Administrator', 'Operator' ]: - base_query = History.query - else: - # if the user isn't an administrator or operator, - # allow_user_view_history must be enabled to get here, - # so include history for the domains for the user - base_query = db.session.query(History) \ - .join(Domain, History.domain_id == Domain.id) \ - .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ - .outerjoin(Account, Domain.account_id == Account.id) \ - .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ - .filter( - db.or_( - DomainUser.user_id == current_user.id, - AccountUser.user_id == current_user.id - )) - - domain_name = request.args.get('domain_name_filter') if request.args.get('domain_name_filter') != None \ - and len(request.args.get('domain_name_filter')) != 0 else None - account_name = request.args.get('account_name_filter') if request.args.get('account_name_filter') != None \ - and len(request.args.get('account_name_filter')) != 0 else None - user_name = request.args.get('auth_name_filter') if request.args.get('auth_name_filter') != None \ - and len(request.args.get('auth_name_filter')) != 0 else None - - min_date = request.args.get('min') if request.args.get('min') != None and len( request.args.get('min')) != 0 else None - if min_date != None: # get 1 day earlier, to check for timezone errors - min_date = str(datetime.datetime.strptime(min_date, '%Y-%m-%d') - datetime.timedelta(days=1)) - max_date = request.args.get('max') if request.args.get('max') != None and len( request.args.get('max')) != 0 else None - if max_date != None: # get 1 day later, to check for timezone errors - max_date = str(datetime.datetime.strptime(max_date, '%Y-%m-%d') + datetime.timedelta(days=1)) - tzoffset = request.args.get('tzoffset') if request.args.get('tzoffset') != None and len(request.args.get('tzoffset')) != 0 else None - changed_by = request.args.get('user_name_filter') if request.args.get('user_name_filter') != None \ - and len(request.args.get('user_name_filter')) != 0 else None - """ - Auth methods: LOCAL, Github OAuth, Azure OAuth, SAML, OIDC OAuth, Google OAuth - """ - auth_methods = [] - if (request.args.get('auth_local_only_checkbox') is None \ - and request.args.get('auth_oauth_only_checkbox') is None \ - and request.args.get('auth_saml_only_checkbox') is None and request.args.get('auth_all_checkbox') is None): - auth_methods = [] - if request.args.get('auth_all_checkbox') == "on": - auth_methods.append("") - if request.args.get('auth_local_only_checkbox') == "on": - auth_methods.append("LOCAL") - if request.args.get('auth_oauth_only_checkbox') == "on": - auth_methods.append("OAuth") - if request.args.get('auth_saml_only_checkbox') == "on": - auth_methods.append("SAML") - - if request.args.get('domain_changelog_only_checkbox') != None: - changelog_only = True if request.args.get('domain_changelog_only_checkbox') == "on" else False - else: - changelog_only = False - - - - - # users cannot search for authentication - if user_name != None and current_user.role.name not in [ 'Administrator', 'Operator']: - histories = [] - elif domain_name != None: - - if not changelog_only: - histories = base_query \ - .filter( - db.and_( - db.or_( - History.msg.like("%domain "+ domain_name) if domain_name != "*" else History.msg.like("%domain%"), - History.msg.like("%domain "+ domain_name + " access control") if domain_name != "*" else History.msg.like("%domain%access control") - ), - History.created_on <= max_date if max_date != None else True, - History.created_on >= min_date if min_date != None else True, - History.created_by == changed_by if changed_by != None else True - ) - ).order_by(History.created_on.desc()).limit(lim).all() - else: - # search for records changes only - histories = base_query \ - .filter( - db.and_( - History.msg.like("Apply record changes to domain " + domain_name) if domain_name != "*" \ - else History.msg.like("Apply record changes to domain%"), - History.created_on <= max_date if max_date != None else True, - History.created_on >= min_date if min_date != None else True, - History.created_by == changed_by if changed_by != None else True - - ) - ).order_by(History.created_on.desc()) \ - .limit(lim).all() - elif account_name != None: - if current_user.role.name in ['Administrator', 'Operator']: - histories = base_query \ - .join(Domain, History.domain_id == Domain.id) \ - .outerjoin(Account, Domain.account_id == Account.id) \ - .filter( - db.and_( - Account.id == Domain.account_id, - account_name == Account.name if account_name != "*" else True, - History.created_on <= max_date if max_date != None else True, - History.created_on >= min_date if min_date != None else True, - History.created_by == changed_by if changed_by != None else True - ) - ).order_by(History.created_on.desc()) \ - .limit(lim).all() - else: - histories = base_query \ - .filter( - db.and_( - Account.id == Domain.account_id, - account_name == Account.name if account_name != "*" else True, - History.created_on <= max_date if max_date != None else True, - History.created_on >= min_date if min_date != None else True, - History.created_by == changed_by if changed_by != None else True - ) - ).order_by(History.created_on.desc()) \ - .limit(lim).all() - elif user_name != None and current_user.role.name in [ 'Administrator', 'Operator']: # only admins can see the user login-logouts - - histories = History.query \ - .filter( - db.and_( - db.or_( - History.msg.like("User "+ user_name + " authentication%") if user_name != "*" and user_name != None else History.msg.like("%authentication%"), - History.msg.like("User "+ user_name + " was not authorized%") if user_name != "*" and user_name != None else History.msg.like("User%was not authorized%") - ), - History.created_on <= max_date if max_date != None else True, - History.created_on >= min_date if min_date != None else True, - History.created_by == changed_by if changed_by != None else True - ) - ) \ - .order_by(History.created_on.desc()).limit(lim).all() - temp = [] - for h in histories: - for method in auth_methods: - if method in h.detail: - temp.append(h) - break - histories = temp - elif (changed_by != None or max_date != None) and current_user.role.name in [ 'Administrator', 'Operator'] : # select changed by and date filters only - histories = History.query \ - .filter( - db.and_( - History.created_on <= max_date if max_date != None else True, - History.created_on >= min_date if min_date != None else True, - History.created_by == changed_by if changed_by != None else True - ) - ) \ - .order_by(History.created_on.desc()).limit(lim).all() - elif (changed_by != None or max_date != None): # special filtering for user because one user does not have access to log-ins logs - histories = base_query \ - .filter( - db.and_( - History.created_on <= max_date if max_date != None else True, - History.created_on >= min_date if min_date != None else True, - History.created_by == changed_by if changed_by != None else True - ) - ) \ - .order_by(History.created_on.desc()).limit(lim).all() - elif max_date != None: # if changed by == null and only date is applied - histories = base_query.filter( - db.and_( - History.created_on <= max_date if max_date != None else True, - History.created_on >= min_date if min_date != None else True, - ) - ).order_by(History.created_on.desc()).limit(lim).all() - else: # default view - if current_user.role.name in [ 'Administrator', 'Operator']: - histories = History.query.order_by(History.created_on.desc()).limit(lim).all() - else: - histories = db.session.query(History) \ - .join(Domain, History.domain_id == Domain.id) \ - .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ - .outerjoin(Account, Domain.account_id == Account.id) \ - .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ - .order_by(History.created_on.desc()) \ - .filter( - db.or_( - DomainUser.user_id == current_user.id, - AccountUser.user_id == current_user.id - )).limit(lim).all() - - detailedHistories = convert_histories(histories) - - # Remove dates from previous or next day that were brought over - if tzoffset != None: - if min_date != None: - min_date_split = min_date.split()[0] - if max_date != None: - max_date_split = max_date.split()[0] - for i, history_rec in enumerate(detailedHistories): - local_date = str(from_utc_to_local(int(tzoffset), history_rec.history.created_on).date()) - if (min_date != None and local_date == min_date_split) or (max_date != None and local_date == max_date_split): - detailedHistories[i] = None - - # Remove elements previously flagged as None - detailedHistories = [h for h in detailedHistories if h is not None] - - return render_template('admin_history_table.html', histories=detailedHistories, len_histories=len(detailedHistories), lim=lim) +def history_table(): # ajax call data + if request.method == 'POST': + if current_user.role.name != 'Administrator': + return make_response( + jsonify({ + 'status': 'error', + 'msg': 'You do not have permission to remove history.' + }), 401) + + h = History() + result = h.remove_all() + if result: + history = History(msg='Remove all histories', + created_by=current_user.username) + history.add() + return make_response( + jsonify({ + 'status': 'ok', + 'msg': 'Changed user role successfully.' + }), 200) + else: + return make_response( + jsonify({ + 'status': 'error', + 'msg': 'Can not remove histories.' + }), 500) + + detailedHistories = [] + lim = int(Setting().get('max_history_records')) # max num of records + + if request.method == 'GET': + base_query = History.query \ + .with_hint(History, "FORCE INDEX (ix_history_created_on)", 'mysql') + if current_user.role.name not in ['Administrator', 'Operator']: + # if the user isn't an administrator or operator, + # allow_user_view_history must be enabled to get here, + # so include history for the zones for the user + allowed_domain_id_subquery = db.session.query(Domain.id) \ + .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ + .outerjoin(Account, Domain.account_id == Account.id) \ + .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ + .filter(db.or_( + DomainUser.user_id == current_user.id, + AccountUser.user_id == current_user.id + )) \ + .subquery() + base_query = base_query.filter(History.domain_id.in_(allowed_domain_id_subquery)) + + domain_name = request.args.get('domain_name_filter') if request.args.get('domain_name_filter') != None \ + and len( + request.args.get('domain_name_filter')) != 0 else None + account_name = request.args.get('account_name_filter') if request.args.get('account_name_filter') != None \ + and len( + request.args.get('account_name_filter')) != 0 else None + user_name = request.args.get('auth_name_filter') if request.args.get('auth_name_filter') != None \ + and len(request.args.get('auth_name_filter')) != 0 else None + + min_date = request.args.get('min') if request.args.get('min') != None and len( + request.args.get('min')) != 0 else None + if min_date != None: # get 1 day earlier, to check for timezone errors + min_date = str(datetime.datetime.strptime(min_date, '%Y-%m-%d') - datetime.timedelta(days=1)) + max_date = request.args.get('max') if request.args.get('max') != None and len( + request.args.get('max')) != 0 else None + if max_date != None: # get 1 day later, to check for timezone errors + max_date = str(datetime.datetime.strptime(max_date, '%Y-%m-%d') + datetime.timedelta(days=1)) + tzoffset = request.args.get('tzoffset') if request.args.get('tzoffset') != None and len( + request.args.get('tzoffset')) != 0 else None + changed_by = request.args.get('user_name_filter') if request.args.get('user_name_filter') != None \ + and len( + request.args.get('user_name_filter')) != 0 else None + """ + Auth methods: LOCAL, Github OAuth, Azure OAuth, SAML, OIDC OAuth, Google OAuth + """ + auth_methods = [] + if (request.args.get('auth_local_only_checkbox') is None \ + and request.args.get('auth_oauth_only_checkbox') is None \ + and request.args.get('auth_saml_only_checkbox') is None and request.args.get( + 'auth_all_checkbox') is None): + auth_methods = [] + if request.args.get('auth_all_checkbox') == "on": + auth_methods.append("") + if request.args.get('auth_local_only_checkbox') == "on": + auth_methods.append("LOCAL") + if request.args.get('auth_oauth_only_checkbox') == "on": + auth_methods.append("OAuth") + if request.args.get('auth_saml_only_checkbox') == "on": + auth_methods.append("SAML") + + if request.args.get('domain_changelog_only_checkbox') != None: + changelog_only = True if request.args.get('domain_changelog_only_checkbox') == "on" else False + else: + changelog_only = False + + # users cannot search for authentication + if user_name != None and current_user.role.name not in ['Administrator', 'Operator']: + histories = [] + elif domain_name != None: + + if not changelog_only: + histories = base_query \ + .filter( + db.and_( + db.or_( + History.msg.like("%domain " + domain_name) if domain_name != "*" else History.msg.like( + "%domain%"), + History.msg.like("%zone " + domain_name) if domain_name != "*" else History.msg.like( + "%zone%"), + History.msg.like( + "%domain " + domain_name + " access control") if domain_name != "*" else History.msg.like( + "%domain%access control"), + History.msg.like( + "%zone " + domain_name + " access control") if domain_name != "*" else History.msg.like( + "%zone%access control") + ), + History.created_on <= max_date if max_date != None else True, + History.created_on >= min_date if min_date != None else True, + History.created_by == changed_by if changed_by != None else True + ) + ).order_by(History.created_on.desc()).limit(lim).all() + else: + # search for records changes only + histories = base_query \ + .filter( + db.and_( + db.or_( + History.msg.like("Apply record changes to domain " + domain_name) if domain_name != "*" \ + else History.msg.like("Apply record changes to domain%"), + History.msg.like("Apply record changes to zone " + domain_name) if domain_name != "*" \ + else History.msg.like("Apply record changes to zone%"), + ), + History.created_on <= max_date if max_date != None else True, + History.created_on >= min_date if min_date != None else True, + History.created_by == changed_by if changed_by != None else True + + ) + ).order_by(History.created_on.desc()) \ + .limit(lim).all() + elif account_name != None: + if current_user.role.name in ['Administrator', 'Operator']: + histories = base_query \ + .join(Domain, History.domain_id == Domain.id) \ + .outerjoin(Account, Domain.account_id == Account.id) \ + .filter( + db.and_( + Account.id == Domain.account_id, + account_name == Account.name if account_name != "*" else True, + History.created_on <= max_date if max_date != None else True, + History.created_on >= min_date if min_date != None else True, + History.created_by == changed_by if changed_by != None else True + ) + ).order_by(History.created_on.desc()) \ + .limit(lim).all() + else: + histories = base_query \ + .filter( + db.and_( + Account.id == Domain.account_id, + account_name == Account.name if account_name != "*" else True, + History.created_on <= max_date if max_date != None else True, + History.created_on >= min_date if min_date != None else True, + History.created_by == changed_by if changed_by != None else True + ) + ).order_by(History.created_on.desc()) \ + .limit(lim).all() + elif user_name != None and current_user.role.name in ['Administrator', + 'Operator']: # only admins can see the user login-logouts + + histories = base_query.filter( + db.and_( + db.or_( + History.msg.like( + "User " + user_name + " authentication%") if user_name != "*" and user_name != None else History.msg.like( + "%authentication%"), + History.msg.like( + "User " + user_name + " was not authorized%") if user_name != "*" and user_name != None else History.msg.like( + "User%was not authorized%") + ), + History.created_on <= max_date if max_date != None else True, + History.created_on >= min_date if min_date != None else True, + History.created_by == changed_by if changed_by != None else True + ) + ) \ + .order_by(History.created_on.desc()).limit(lim).all() + temp = [] + for h in histories: + for method in auth_methods: + if method in h.detail: + temp.append(h) + break + histories = temp + elif (changed_by != None or max_date != None) and current_user.role.name in ['Administrator', + 'Operator']: # select changed by and date filters only + histories = base_query.filter( + db.and_( + History.created_on <= max_date if max_date != None else True, + History.created_on >= min_date if min_date != None else True, + History.created_by == changed_by if changed_by != None else True + ) + ) \ + .order_by(History.created_on.desc()).limit(lim).all() + elif ( + changed_by != None or max_date != None): # special filtering for user because one user does not have access to log-ins logs + histories = base_query.filter( + db.and_( + History.created_on <= max_date if max_date != None else True, + History.created_on >= min_date if min_date != None else True, + History.created_by == changed_by if changed_by != None else True + ) + ) \ + .order_by(History.created_on.desc()).limit(lim).all() + elif max_date != None: # if changed by == null and only date is applied + histories = base_query.filter( + db.and_( + History.created_on <= max_date if max_date != None else True, + History.created_on >= min_date if min_date != None else True, + ) + ).order_by(History.created_on.desc()).limit(lim).all() + else: # default view + histories = base_query.order_by(History.created_on.desc()).limit(lim).all() + + detailedHistories = convert_histories(histories) + + # Remove dates from previous or next day that were brought over + if tzoffset != None: + if min_date != None: + min_date_split = min_date.split()[0] + if max_date != None: + max_date_split = max_date.split()[0] + for i, history_rec in enumerate(detailedHistories): + local_date = str(from_utc_to_local(int(tzoffset), history_rec.history.created_on).date()) + if (min_date != None and local_date == min_date_split) or ( + max_date != None and local_date == max_date_split): + detailedHistories[i] = None + + # Remove elements previously flagged as None + detailedHistories = [h for h in detailedHistories if h is not None] + + return render_template('admin_history_table.html', histories=detailedHistories, + len_histories=len(detailedHistories), lim=lim) @admin_bp.route('/setting/basic', methods=['GET']) @login_required @operator_role_required def setting_basic(): - if request.method == 'GET': - settings = [ - 'maintenance', 'fullscreen_layout', 'record_helper', - 'login_ldap_first', 'default_record_table_size', - 'default_domain_table_size', 'auto_ptr', 'record_quick_edit', - 'pretty_ipv6_ptr', 'dnssec_admins_only', - 'allow_user_create_domain', 'allow_user_remove_domain', 'allow_user_view_history', 'bg_domain_updates', 'site_name', - 'session_timeout', 'warn_session_timeout', 'ttl_options', - 'pdns_api_timeout', 'verify_ssl_connections', 'verify_user_email', - 'delete_sso_accounts', 'otp_field_enabled', 'custom_css', 'enable_api_rr_history', 'max_history_records', 'otp_force' - ] - - return render_template('admin_setting_basic.html', settings=settings) + settings = [ + 'account_name_extra_chars', + 'allow_user_create_domain', + 'allow_user_remove_domain', + 'allow_user_view_history', + 'auto_ptr', + 'bg_domain_updates', + 'custom_css', + 'default_domain_table_size', + 'default_record_table_size', + 'delete_sso_accounts', + 'custom_history_header', + 'deny_domain_override', + 'dnssec_admins_only', + 'enable_api_rr_history', + 'enforce_api_ttl', + 'fullscreen_layout', + 'gravatar_enabled', + 'login_ldap_first', + 'maintenance', + 'max_history_records', + 'otp_field_enabled', + 'otp_force', + 'pdns_api_timeout', + 'preserve_history', + 'pretty_ipv6_ptr', + 'record_helper', + 'record_quick_edit', + 'session_timeout', + 'site_name', + 'ttl_options', + 'verify_ssl_connections', + 'verify_user_email', + 'warn_session_timeout', + ] + + return render_template('admin_setting_basic.html', settings=settings) @admin_bp.route('/setting/basic//edit', methods=['POST']) @@ -1281,7 +1405,7 @@ def setting_basic_edit(setting): new_value = jdata['value'] result = Setting().set(setting, new_value) - if (result): + if result: return make_response( jsonify({ 'status': 'ok', @@ -1345,294 +1469,54 @@ def setting_pdns(): @login_required @operator_role_required def setting_records(): + from powerdnsadmin.lib.settings import AppSettings if request.method == 'GET': - _fr = Setting().get('forward_records_allow_edit') - _rr = Setting().get('reverse_records_allow_edit') - f_records = literal_eval(_fr) if isinstance(_fr, str) else _fr - r_records = literal_eval(_rr) if isinstance(_rr, str) else _rr - + forward_records = Setting().get('forward_records_allow_edit') + reverse_records = Setting().get('reverse_records_allow_edit') return render_template('admin_setting_records.html', - f_records=f_records, - r_records=r_records) + f_records=forward_records, + r_records=reverse_records) elif request.method == 'POST': fr = {} rr = {} - records = Setting().defaults['forward_records_allow_edit'] + records = AppSettings.defaults['forward_records_allow_edit'] for r in records: fr[r] = True if request.form.get('fr_{0}'.format( r.lower())) else False rr[r] = True if request.form.get('rr_{0}'.format( r.lower())) else False - Setting().set('forward_records_allow_edit', str(fr)) - Setting().set('reverse_records_allow_edit', str(rr)) - return redirect(url_for('admin.setting_records')) - + Setting().set('forward_records_allow_edit', json.dumps(fr)) + Setting().set('reverse_records_allow_edit', json.dumps(rr)) -def has_an_auth_method(local_db_enabled=None, - ldap_enabled=None, - google_oauth_enabled=None, - github_oauth_enabled=None, - oidc_oauth_enabled=None, - azure_oauth_enabled=None): - if local_db_enabled is None: - local_db_enabled = Setting().get('local_db_enabled') - if ldap_enabled is None: - ldap_enabled = Setting().get('ldap_enabled') - if google_oauth_enabled is None: - google_oauth_enabled = Setting().get('google_oauth_enabled') - if github_oauth_enabled is None: - github_oauth_enabled = Setting().get('github_oauth_enabled') - if oidc_oauth_enabled is None: - oidc_oauth_enabled = Setting().get('oidc_oauth_enabled') - if azure_oauth_enabled is None: - azure_oauth_enabled = Setting().get('azure_oauth_enabled') - return local_db_enabled or ldap_enabled or google_oauth_enabled or github_oauth_enabled or oidc_oauth_enabled or azure_oauth_enabled + return redirect(url_for('admin.setting_records')) @admin_bp.route('/setting/authentication', methods=['GET', 'POST']) @login_required @admin_role_required def setting_authentication(): - if request.method == 'GET': - return render_template('admin_setting_authentication.html') - elif request.method == 'POST': - conf_type = request.form.get('config_tab') - result = None + return render_template('admin_setting_authentication.html') - if conf_type == 'general': - local_db_enabled = True if request.form.get( - 'local_db_enabled') else False - signup_enabled = True if request.form.get( - 'signup_enabled', ) else False - if not has_an_auth_method(local_db_enabled=local_db_enabled): - result = { - 'status': - False, - 'msg': - 'Must have at least one authentication method enabled.' - } - else: - Setting().set('local_db_enabled', local_db_enabled) - Setting().set('signup_enabled', signup_enabled) - result = {'status': True, 'msg': 'Saved successfully'} - elif conf_type == 'ldap': - ldap_enabled = True if request.form.get('ldap_enabled') else False - - if not has_an_auth_method(ldap_enabled=ldap_enabled): - result = { - 'status': - False, - 'msg': - 'Must have at least one authentication method enabled.' - } - else: - Setting().set('ldap_enabled', ldap_enabled) - Setting().set('ldap_type', request.form.get('ldap_type')) - Setting().set('ldap_uri', request.form.get('ldap_uri')) - Setting().set('ldap_base_dn', request.form.get('ldap_base_dn')) - Setting().set('ldap_admin_username', - request.form.get('ldap_admin_username')) - Setting().set('ldap_admin_password', - request.form.get('ldap_admin_password')) - Setting().set('ldap_filter_basic', - request.form.get('ldap_filter_basic')) - Setting().set('ldap_filter_group', - request.form.get('ldap_filter_group')) - Setting().set('ldap_filter_username', - request.form.get('ldap_filter_username')) - Setting().set('ldap_filter_groupname', - request.form.get('ldap_filter_groupname')) - Setting().set( - 'ldap_sg_enabled', True - if request.form.get('ldap_sg_enabled') == 'ON' else False) - Setting().set('ldap_admin_group', - request.form.get('ldap_admin_group')) - Setting().set('ldap_operator_group', - request.form.get('ldap_operator_group')) - Setting().set('ldap_user_group', - request.form.get('ldap_user_group')) - Setting().set('ldap_domain', request.form.get('ldap_domain')) - Setting().set( - 'autoprovisioning', True - if request.form.get('autoprovisioning') == 'ON' else False) - Setting().set('autoprovisioning_attribute', - request.form.get('autoprovisioning_attribute')) - - if request.form.get('autoprovisioning')=='ON': - if validateURN(request.form.get('urn_value')): - Setting().set('urn_value', - request.form.get('urn_value')) - else: - return render_template('admin_setting_authentication.html', - error="Invalid urn") - else: - Setting().set('urn_value', - request.form.get('urn_value')) +@admin_bp.route('/setting/authentication/api', methods=['POST']) +@login_required +@admin_role_required +def setting_authentication_api(): + from powerdnsadmin.lib.settings import AppSettings + result = {'status': 1, 'messages': [], 'data': {}} - Setting().set('purge', True - if request.form.get('purge') == 'ON' else False) + if request.form.get('commit') == '1': + model = Setting() + data = json.loads(request.form.get('data')) + for key, value in data.items(): + if key in AppSettings.groups['authentication']: + model.set(key, value) - result = {'status': True, 'msg': 'Saved successfully'} - elif conf_type == 'google': - google_oauth_enabled = True if request.form.get( - 'google_oauth_enabled') else False - if not has_an_auth_method(google_oauth_enabled=google_oauth_enabled): - result = { - 'status': - False, - 'msg': - 'Must have at least one authentication method enabled.' - } - else: - Setting().set('google_oauth_enabled', google_oauth_enabled) - Setting().set('google_oauth_client_id', - request.form.get('google_oauth_client_id')) - Setting().set('google_oauth_client_secret', - request.form.get('google_oauth_client_secret')) - Setting().set('google_token_url', - request.form.get('google_token_url')) - Setting().set('google_oauth_scope', - request.form.get('google_oauth_scope')) - Setting().set('google_authorize_url', - request.form.get('google_authorize_url')) - Setting().set('google_base_url', - request.form.get('google_base_url')) - result = { - 'status': True, - 'msg': - 'Saved successfully. Please reload PDA to take effect.' - } - elif conf_type == 'github': - github_oauth_enabled = True if request.form.get( - 'github_oauth_enabled') else False - if not has_an_auth_method(github_oauth_enabled=github_oauth_enabled): - result = { - 'status': - False, - 'msg': - 'Must have at least one authentication method enabled.' - } - else: - Setting().set('github_oauth_enabled', github_oauth_enabled) - Setting().set('github_oauth_key', - request.form.get('github_oauth_key')) - Setting().set('github_oauth_secret', - request.form.get('github_oauth_secret')) - Setting().set('github_oauth_scope', - request.form.get('github_oauth_scope')) - Setting().set('github_oauth_api_url', - request.form.get('github_oauth_api_url')) - Setting().set('github_oauth_token_url', - request.form.get('github_oauth_token_url')) - Setting().set('github_oauth_authorize_url', - request.form.get('github_oauth_authorize_url')) - result = { - 'status': True, - 'msg': - 'Saved successfully. Please reload PDA to take effect.' - } - elif conf_type == 'azure': - azure_oauth_enabled = True if request.form.get( - 'azure_oauth_enabled') else False - if not has_an_auth_method(azure_oauth_enabled=azure_oauth_enabled): - result = { - 'status': - False, - 'msg': - 'Must have at least one authentication method enabled.' - } - else: - Setting().set('azure_oauth_enabled', azure_oauth_enabled) - Setting().set('azure_oauth_key', - request.form.get('azure_oauth_key')) - Setting().set('azure_oauth_secret', - request.form.get('azure_oauth_secret')) - Setting().set('azure_oauth_scope', - request.form.get('azure_oauth_scope')) - Setting().set('azure_oauth_api_url', - request.form.get('azure_oauth_api_url')) - Setting().set('azure_oauth_token_url', - request.form.get('azure_oauth_token_url')) - Setting().set('azure_oauth_authorize_url', - request.form.get('azure_oauth_authorize_url')) - Setting().set( - 'azure_sg_enabled', True - if request.form.get('azure_sg_enabled') == 'ON' else False) - Setting().set('azure_admin_group', - request.form.get('azure_admin_group')) - Setting().set('azure_operator_group', - request.form.get('azure_operator_group')) - Setting().set('azure_user_group', - request.form.get('azure_user_group')) - Setting().set( - 'azure_group_accounts_enabled', True - if request.form.get('azure_group_accounts_enabled') == 'ON' else False) - Setting().set('azure_group_accounts_name', - request.form.get('azure_group_accounts_name')) - Setting().set('azure_group_accounts_name_re', - request.form.get('azure_group_accounts_name_re')) - Setting().set('azure_group_accounts_description', - request.form.get('azure_group_accounts_description')) - Setting().set('azure_group_accounts_description_re', - request.form.get('azure_group_accounts_description_re')) - result = { - 'status': True, - 'msg': - 'Saved successfully. Please reload PDA to take effect.' - } - elif conf_type == 'oidc': - oidc_oauth_enabled = True if request.form.get( - 'oidc_oauth_enabled') else False - if not has_an_auth_method(oidc_oauth_enabled=oidc_oauth_enabled): - result = { - 'status': - False, - 'msg': - 'Must have at least one authentication method enabled.' - } - else: - Setting().set( - 'oidc_oauth_enabled', - True if request.form.get('oidc_oauth_enabled') else False) - Setting().set('oidc_oauth_key', - request.form.get('oidc_oauth_key')) - Setting().set('oidc_oauth_secret', - request.form.get('oidc_oauth_secret')) - Setting().set('oidc_oauth_scope', - request.form.get('oidc_oauth_scope')) - Setting().set('oidc_oauth_api_url', - request.form.get('oidc_oauth_api_url')) - Setting().set('oidc_oauth_token_url', - request.form.get('oidc_oauth_token_url')) - Setting().set('oidc_oauth_authorize_url', - request.form.get('oidc_oauth_authorize_url')) - Setting().set('oidc_oauth_logout_url', - request.form.get('oidc_oauth_logout_url')) - Setting().set('oidc_oauth_username', - request.form.get('oidc_oauth_username')) - Setting().set('oidc_oauth_firstname', - request.form.get('oidc_oauth_firstname')) - Setting().set('oidc_oauth_last_name', - request.form.get('oidc_oauth_last_name')) - Setting().set('oidc_oauth_email', - request.form.get('oidc_oauth_email')) - Setting().set('oidc_oauth_account_name_property', - request.form.get('oidc_oauth_account_name_property')) - Setting().set('oidc_oauth_account_description_property', - request.form.get('oidc_oauth_account_description_property')) - result = { - 'status': True, - 'msg': - 'Saved successfully. Please reload PDA to take effect.' - } - else: - return abort(400) + result['data'] = Setting().get_group('authentication') - return render_template('admin_setting_authentication.html', - result=result) + return result @admin_bp.route('/templates', methods=['GET', 'POST']) @@ -1669,11 +1553,11 @@ def create_template(): t = DomainTemplate(name=name, description=description) result = t.create() if result['status'] == 'ok': - history = History(msg='Add domain template {0}'.format(name), - detail = json.dumps({ - 'name': name, - 'description': description - }), + history = History(msg='Add zone template {0}'.format(name), + detail=json.dumps({ + 'name': name, + 'description': description + }), created_by=current_user.username) history.add() return redirect(url_for('admin.templates')) @@ -1682,7 +1566,7 @@ def create_template(): return redirect(url_for('admin.create_template')) except Exception as e: current_app.logger.error( - 'Cannot create domain template. Error: {0}'.format(e)) + 'Cannot create zone template. Error: {0}'.format(e)) current_app.logger.debug(traceback.format_exc()) abort(500) @@ -1708,23 +1592,23 @@ def create_template_from_zone(): return make_response( jsonify({ 'status': - 'error', + 'error', 'msg': - 'A template with the name {0} already exists!'.format(name) + 'A template with the name {0} already exists!'.format(name) }), 409) t = DomainTemplate(name=name, description=description) result = t.create() if result['status'] == 'ok': - history = History(msg='Add domain template {0}'.format(name), - detail = json.dumps({ - 'name': name, - 'description': description - }), + history = History(msg='Add zone template {0}'.format(name), + detail=json.dumps({ + 'name': name, + 'description': description + }), created_by=current_user.username) history.add() - # After creating the domain in Domain Template in the, + # After creating the zone in Zone Template in the, # local DB. We add records into it Record Template. records = [] domain = Domain.query.filter(Domain.name == domain_name).first() @@ -1753,7 +1637,7 @@ def create_template_from_zone(): 'msg': result['msg'] }), 200) else: - # Revert the domain template (remove it) + # Revert the zone template (remove it) # ff we cannot add records. t.delete_template() return make_response( @@ -1810,7 +1694,7 @@ def edit_template(template): ttl_options=ttl_options) except Exception as e: current_app.logger.error( - 'Cannot open domain template page. DETAIL: {0}'.format(e)) + 'Cannot open zone template page. DETAIL: {0}'.format(e)) current_app.logger.debug(traceback.format_exc()) abort(500) return redirect(url_for('admin.templates')) @@ -1848,9 +1732,9 @@ def apply_records(template): jdata.pop('_csrf_token', None) # don't store csrf token in the history. history = History( - msg='Apply domain template record changes to domain template {0}' + msg='Apply zone template record changes to zone template {0}' .format(template), - detail = json.dumps(jdata), + detail=json.dumps(jdata), created_by=current_user.username) history.add() return make_response(jsonify(result), 200) @@ -1879,8 +1763,8 @@ def delete_template(template): result = t.delete_template() if result['status'] == 'ok': history = History( - msg='Deleted domain template {0}'.format(template), - detail = json.dumps({'name': template}), + msg='Deleted zone template {0}'.format(template), + detail=json.dumps({'name': template}), created_by=current_user.username) history.add() return redirect(url_for('admin.templates')) @@ -1897,7 +1781,6 @@ def delete_template(template): @admin_bp.route('/global-search', methods=['GET']) @login_required -@operator_role_required def global_search(): if request.method == 'GET': domains = [] @@ -1909,6 +1792,22 @@ def global_search(): server = Server(server_id='localhost') results = server.global_search(object_type='all', query=query) + # Filter results to domains to which the user has access permission + if current_user.role.name not in ['Administrator', 'Operator']: + allowed_domains = db.session.query(Domain) \ + .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ + .outerjoin(Account, Domain.account_id == Account.id) \ + .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ + .filter( + db.or_( + DomainUser.user_id == current_user.id, + AccountUser.user_id == current_user.id + )) \ + .with_entities(Domain.name) \ + .all() + allowed_domains = [value for value, in allowed_domains] + results = list(filter(lambda r: r['zone_id'][:-1] in allowed_domains, results)) + # Format the search result for result in results: if result['object_type'] == 'zone': @@ -1933,30 +1832,45 @@ def global_search(): else: pass - return render_template('admin_global_search.html', domains=domains, records=records, comments=comments) + params: dict = { + 'query': query if query is not None else '', + 'domains': domains, + 'records': records, + 'comments': comments, + } + + return render_template('admin_global_search.html', **params) + def validateURN(value): NID_PATTERN = re.compile(r'^[0-9a-z][0-9a-z-]{1,31}$', flags=re.IGNORECASE) NSS_PCHAR = '[a-z0-9-._~]|%[a-f0-9]{2}|[!$&\'()*+,;=]|:|@' NSS_PATTERN = re.compile(fr'^({NSS_PCHAR})({NSS_PCHAR}|/|\?)*$', re.IGNORECASE) - prefix=value.split(':') - if (len(prefix)<3): - current_app.logger.warning( "Too small urn prefix" ) + prefix = value.split(':') + if (len(prefix) < 3): + current_app.logger.warning("Too small urn prefix") return False - urn=prefix[0] - nid=prefix[1] - nss=value.replace(urn+":"+nid+":", "") + urn = prefix[0] + nid = prefix[1] + nss = value.replace(urn + ":" + nid + ":", "") - if not urn.lower()=="urn": - current_app.logger.warning( urn + ' contains invalid characters ' ) + if not urn.lower() == "urn": + current_app.logger.warning(urn + ' contains invalid characters ') return False if not re.match(NID_PATTERN, nid.lower()): - current_app.logger.warning( nid + ' contains invalid characters ' ) + current_app.logger.warning(nid + ' contains invalid characters ') return False if not re.match(NSS_PATTERN, nss): - current_app.logger.warning( nss + ' contains invalid characters ' ) + current_app.logger.warning(nss + ' contains invalid characters ') return False return True + + +def safe_cast(val, to_type, default=None): + try: + return to_type(val) + except (ValueError, TypeError): + return default diff --git a/powerdnsadmin/routes/api.py b/powerdnsadmin/routes/api.py index 68d235287..9f72b4e0d 100644 --- a/powerdnsadmin/routes/api.py +++ b/powerdnsadmin/routes/api.py @@ -1,21 +1,21 @@ import json -from urllib.parse import urljoin +import secrets +import string from base64 import b64encode -from flask import ( - Blueprint, g, request, abort, current_app, make_response, jsonify, -) +from urllib.parse import urljoin + +from flask import (Blueprint, g, request, abort, current_app, make_response, jsonify) from flask_login import current_user -from ..models.base import db -from ..models import ( - User, Domain, DomainUser, Account, AccountUser, History, Setting, ApiKey, - Role, +from .base import csrf +from ..decorators import ( + api_basic_auth, api_can_create_domain, is_json, apikey_auth, + apikey_can_create_domain, apikey_can_remove_domain, + apikey_is_admin, apikey_can_access_domain, apikey_can_configure_dnssec, + api_role_can, apikey_or_basic_auth, + callback_if_request_body_contains_key, allowed_record_types, allowed_record_ttl ) from ..lib import utils, helper -from ..lib.schema import ( - ApiKeySchema, DomainSchema, ApiPlainKeySchema, UserSchema, AccountSchema, - UserDetailedSchema, -) from ..lib.errors import ( StructuredException, DomainNotExists, DomainAlreadyExists, DomainAccessForbidden, @@ -23,19 +23,20 @@ AccountCreateFail, AccountUpdateFail, AccountDeleteFail, AccountCreateDuplicate, AccountNotExists, UserCreateFail, UserCreateDuplicate, UserUpdateFail, UserDeleteFail, - UserUpdateFailEmail, + UserUpdateFailEmail, InvalidAccountNameException ) -from ..decorators import ( - api_basic_auth, api_can_create_domain, is_json, apikey_auth, - apikey_can_create_domain, apikey_can_remove_domain, - apikey_is_admin, apikey_can_access_domain, apikey_can_configure_dnssec, - api_role_can, apikey_or_basic_auth, - callback_if_request_body_contains_key, +from ..lib.schema import ( + ApiKeySchema, DomainSchema, ApiPlainKeySchema, UserSchema, AccountSchema, + UserDetailedSchema, ) -import secrets -import string +from ..models import ( + User, Domain, DomainUser, Account, AccountUser, History, Setting, ApiKey, + Role, +) +from ..models.base import db api_bp = Blueprint('api', __name__, url_prefix='/api/v1') +apilist_bp = Blueprint('apilist', __name__, url_prefix='/') apikey_schema = ApiKeySchema(many=True) apikey_single_schema = ApiKeySchema() @@ -47,16 +48,23 @@ account_schema = AccountSchema(many=True) account_single_schema = AccountSchema() +def is_custom_header_api(): + custom_header_setting = Setting().get('custom_history_header') + if custom_header_setting != '' and custom_header_setting in request.headers: + return request.headers[custom_header_setting] + else: + return g.apikey.description + def get_user_domains(): domains = db.session.query(Domain) \ .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ .outerjoin(Account, Domain.account_id == Account.id) \ .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ .filter( - db.or_( - DomainUser.user_id == current_user.id, - AccountUser.user_id == current_user.id - )).all() + db.or_( + DomainUser.user_id == current_user.id, + AccountUser.user_id == current_user.id + )).all() return domains @@ -68,10 +76,10 @@ def get_user_apikeys(domain_name=None): .outerjoin(Account, Domain.account_id == Account.id) \ .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ .filter( - db.or_( - DomainUser.user_id == User.id, - AccountUser.user_id == User.id - ) + db.or_( + DomainUser.user_id == User.id, + AccountUser.user_id == User.id + ) ) \ .filter(User.id == current_user.id) @@ -164,12 +172,7 @@ def handle_request_is_not_json(err): def before_request(): # Check site is in maintenance mode maintenance = Setting().get('maintenance') - if ( - maintenance and current_user.is_authenticated and - current_user.role.name not in [ - 'Administrator', 'Operator' - ] - ): + if (maintenance and current_user.is_authenticated and current_user.role.name not in ['Administrator', 'Operator']): return make_response( jsonify({ "status": False, @@ -177,9 +180,15 @@ def before_request(): })) +@apilist_bp.route('/api', methods=['GET']) +def index(): + return '[{"url": "/api/v1", "version": 1}]', 200 + + @api_bp.route('/pdnsadmin/zones', methods=['POST']) @api_basic_auth @api_can_create_domain +@csrf.exempt def api_login_create_zone(): pdns_api_url = Setting().get('pdns_api_url') pdns_api_key = Setting().get('pdns_api_key') @@ -202,7 +211,7 @@ def api_login_create_zone(): accept='application/json; q=1', verify=Setting().get('verify_ssl_connections')) except Exception as e: - current_app.logger.error("Cannot create domain. Error: {}".format(e)) + current_app.logger.error("Cannot create zone. Error: {}".format(e)) abort(500) if resp.status_code == 201: @@ -213,16 +222,15 @@ def api_login_create_zone(): domain.update() domain_id = domain.get_id_by_name(data['name'].rstrip('.')) - history = History(msg='Add domain {0}'.format( + history = History(msg='Add zone {0}'.format( data['name'].rstrip('.')), - detail=json.dumps(data), - created_by=current_user.username, - domain_id=domain_id) + detail=json.dumps(data), + created_by=current_user.username, + domain_id=domain_id) history.add() if current_user.role.name not in ['Administrator', 'Operator']: - current_app.logger.debug( - "User is ordinary user, assigning created domain") + current_app.logger.debug("User is ordinary user, assigning created zone") domain = Domain(name=data['name'].rstrip('.')) domain.update() domain.grant_privileges([current_user.id]) @@ -248,6 +256,7 @@ def api_login_list_zones(): @api_bp.route('/pdnsadmin/zones/', methods=['DELETE']) @api_basic_auth @api_can_create_domain +@csrf.exempt def api_login_delete_zone(domain_name): pdns_api_url = Setting().get('pdns_api_url') pdns_api_key = Setting().get('pdns_api_key') @@ -287,14 +296,13 @@ def api_login_delete_zone(domain_name): domain_id = domain.get_id_by_name(domain_name) domain.update() - history = History(msg='Delete domain {0}'.format( + history = History(msg='Delete zone {0}'.format( utils.pretty_domain_name(domain_name)), - detail='', - created_by=current_user.username, - domain_id=domain_id) + detail='', + created_by=current_user.username, + domain_id=domain_id) history.add() - except Exception as e: current_app.logger.error('Error: {0}'.format(e)) abort(500) @@ -304,6 +312,7 @@ def api_login_delete_zone(domain_name): @api_bp.route('/pdnsadmin/apikeys', methods=['POST']) @api_basic_auth +@csrf.exempt def api_generate_apikey(): data = request.get_json() description = None @@ -316,14 +325,14 @@ def api_generate_apikey(): if 'domains' not in data: domains = [] - elif not isinstance(data['domains'], (list, )): + elif not isinstance(data['domains'], (list,)): abort(400) else: domains = [d['name'] if isinstance(d, dict) else d for d in data['domains']] if 'accounts' not in data: accounts = [] - elif not isinstance(data['accounts'], (list, )): + elif not isinstance(data['accounts'], (list,)): abort(400) else: accounts = [a['name'] if isinstance(a, dict) else a for a in data['accounts']] @@ -338,13 +347,13 @@ def api_generate_apikey(): abort(400) if role_name == 'User' and len(domains) == 0 and len(accounts) == 0: - current_app.logger.error("Apikey with User role must have domains or accounts") + current_app.logger.error("Apikey with User role must have zones or accounts") raise ApiKeyNotUsable() if role_name == 'User' and len(domains) > 0: domain_obj_list = Domain.query.filter(Domain.name.in_(domains)).all() if len(domain_obj_list) == 0: - msg = "One of supplied domains does not exist" + msg = "One of supplied zones does not exist" current_app.logger.error(msg) raise DomainNotExists(message=msg) @@ -374,14 +383,13 @@ def api_generate_apikey(): domain_list = [item.name for item in domain_obj_list] user_domain_list = [item.name for item in user_domain_obj_list] - current_app.logger.debug("Input domain list: {0}".format(domain_list)) - current_app.logger.debug( - "User domain list: {0}".format(user_domain_list)) + current_app.logger.debug("Input zone list: {0}".format(domain_list)) + current_app.logger.debug("User zone list: {0}".format(user_domain_list)) inter = set(domain_list).intersection(set(user_domain_list)) if not (len(inter) == len(domain_list)): - msg = "You don't have access to one of domains" + msg = "You don't have access to one of zones" current_app.logger.error(msg) raise DomainAccessForbidden(message=msg) @@ -409,7 +417,7 @@ def api_get_apikeys(domain_name): if current_user.role.name not in ['Administrator', 'Operator']: if domain_name: - msg = "Check if domain {0} exists and is allowed for user.".format( + msg = "Check if zone {0} exists and is allowed for user.".format( domain_name) current_app.logger.debug(msg) apikeys = get_user_apikeys(domain_name) @@ -419,7 +427,7 @@ def api_get_apikeys(domain_name): current_app.logger.debug(apikey_schema.dump(apikeys)) else: - msg_str = "Getting all allowed domains for user {0}" + msg_str = "Getting all allowed zones for user {0}" msg = msg_str.format(current_user.username) current_app.logger.debug(msg) @@ -430,7 +438,7 @@ def api_get_apikeys(domain_name): current_app.logger.error('Error: {0}'.format(e)) abort(500) else: - current_app.logger.debug("Getting all domains for administrative user") + current_app.logger.debug("Getting all zones for administrative user") try: apikeys = ApiKey.query.all() current_app.logger.debug(apikey_schema.dump(apikeys)) @@ -460,6 +468,7 @@ def api_get_apikey(apikey_id): @api_bp.route('/pdnsadmin/apikeys/', methods=['DELETE']) @api_basic_auth +@csrf.exempt def api_delete_apikey(apikey_id): apikey = ApiKey.query.get(apikey_id) @@ -479,7 +488,7 @@ def api_delete_apikey(apikey_id): inter = set(apikey_domains_list).intersection(set(user_domains_list)) if not (len(inter) == len(apikey_domains_list)): - msg = "You don't have access to some domains apikey belongs to" + msg = "You don't have access to some zones apikey belongs to" current_app.logger.error(msg) raise DomainAccessForbidden(message=msg) @@ -497,6 +506,7 @@ def api_delete_apikey(apikey_id): @api_bp.route('/pdnsadmin/apikeys/', methods=['PUT']) @api_basic_auth +@csrf.exempt def api_update_apikey(apikey_id): # if role different and user is allowed to change it, update # if apikey domains are different and user is allowed to handle @@ -527,14 +537,14 @@ def api_update_apikey(apikey_id): if 'domains' not in data: domains = None - elif not isinstance(data['domains'], (list, )): + elif not isinstance(data['domains'], (list,)): abort(400) else: domains = [d['name'] if isinstance(d, dict) else d for d in data['domains']] if 'accounts' not in data: accounts = None - elif not isinstance(data['accounts'], (list, )): + elif not isinstance(data['accounts'], (list,)): abort(400) else: accounts = [a['name'] if isinstance(a, dict) else a for a in data['accounts']] @@ -548,7 +558,7 @@ def api_update_apikey(apikey_id): if domains is not None: domain_obj_list = Domain.query.filter(Domain.name.in_(domains)).all() if len(domain_obj_list) != len(domains): - msg = "One of supplied domains does not exist" + msg = "One of supplied zones does not exist" current_app.logger.error(msg) raise DomainNotExists(message=msg) @@ -568,12 +578,12 @@ def api_update_apikey(apikey_id): target_accounts = current_accounts if len(target_domains) == 0 and len(target_accounts) == 0: - current_app.logger.error("Apikey with User role must have domains or accounts") + current_app.logger.error("Apikey with User role must have zones or accounts") raise ApiKeyNotUsable() if domains is not None and set(domains) == set(current_domains): current_app.logger.debug( - "Domains are the same, apikey domains won't be updated") + "Zones are the same, apikey zones won't be updated") domains = None if accounts is not None and set(accounts) == set(current_accounts): @@ -600,19 +610,19 @@ def api_update_apikey(apikey_id): domain_list = [item.name for item in domain_obj_list] user_domain_list = [item.name for item in user_domain_obj_list] - current_app.logger.debug("Input domain list: {0}".format(domain_list)) + current_app.logger.debug("Input zone list: {0}".format(domain_list)) current_app.logger.debug( - "User domain list: {0}".format(user_domain_list)) + "User zone list: {0}".format(user_domain_list)) inter = set(domain_list).intersection(set(user_domain_list)) if not (len(inter) == len(domain_list)): - msg = "You don't have access to one of domains" + msg = "You don't have access to one of zones" current_app.logger.error(msg) raise DomainAccessForbidden(message=msg) if apikey_id not in apikeys_ids: - msg = 'Apikey does not belong to domain to which user has access' + msg = 'Apikey does not belong to zone to which user has access' current_app.logger.error(msg) raise DomainAccessForbidden() @@ -658,6 +668,7 @@ def api_list_users(username=None): @api_bp.route('/pdnsadmin/users', methods=['POST']) @api_basic_auth @api_role_can('create users', allow_self=True) +@csrf.exempt def api_create_user(): """ Create new user @@ -731,6 +742,7 @@ def api_create_user(): @api_bp.route('/pdnsadmin/users/', methods=['PUT']) @api_basic_auth @api_role_can('update users', allow_self=True) +@csrf.exempt def api_update_user(user_id): """ Update existing user @@ -803,6 +815,7 @@ def api_update_user(user_id): @api_bp.route('/pdnsadmin/users/', methods=['DELETE']) @api_basic_auth @api_role_can('delete users') +@csrf.exempt def api_delete_user(user_id): user = User.query.get(user_id) if not user: @@ -854,6 +867,7 @@ def api_list_accounts(account_name): @api_bp.route('/pdnsadmin/accounts', methods=['POST']) @api_basic_auth +@csrf.exempt def api_create_account(): if current_user.role.name not in ['Administrator', 'Operator']: msg = "{} role cannot create accounts".format(current_user.role.name) @@ -864,12 +878,15 @@ def api_create_account(): contact = data['contact'] if 'contact' in data else None mail = data['mail'] if 'mail' in data else None if not name: - current_app.logger.debug("Account name missing") - abort(400) + current_app.logger.debug("Account creation failed: name missing") + raise InvalidAccountNameException(message="Account name missing") + + sanitized_name = Account.sanitize_name(name) + account_exists = Account.query.filter(Account.name == sanitized_name).all() - account_exists = [] or Account.query.filter(Account.name == name).all() if len(account_exists) > 0: - msg = "Account {} already exists".format(name) + msg = ("Requested Account {} would be translated to {}" + " which already exists").format(name, sanitized_name) current_app.logger.debug(msg) raise AccountCreateDuplicate(message=msg) @@ -895,6 +912,7 @@ def api_create_account(): @api_bp.route('/pdnsadmin/accounts/', methods=['PUT']) @api_basic_auth @api_role_can('update accounts') +@csrf.exempt def api_update_account(account_id): data = request.get_json() name = data['name'] if 'name' in data else None @@ -907,8 +925,9 @@ def api_update_account(account_id): if not account: abort(404) - if name and name != account.name: - abort(400) + if name and Account.sanitize_name(name) != account.name: + msg = "Account name is immutable" + raise AccountUpdateFail(message=msg) if current_user.role.name not in ['Administrator', 'Operator']: msg = "User role update accounts" @@ -935,22 +954,21 @@ def api_update_account(account_id): @api_bp.route('/pdnsadmin/accounts/', methods=['DELETE']) @api_basic_auth @api_role_can('delete accounts') +@csrf.exempt def api_delete_account(account_id): account_list = [] or Account.query.filter(Account.id == account_id).all() if len(account_list) == 1: account = account_list[0] else: abort(404) - current_app.logger.debug( - f'Deleting Account {account.name}' - ) + current_app.logger.debug(f'Deleting Account {account.name}') # Remove account association from domains first if len(account.domains) > 0: for domain in account.domains: - current_app.logger.info(f"Disassociating domain {domain.name} with {account.name}") + current_app.logger.info(f"Disassociating zone {domain.name} with {account.name}") Domain(name=domain.name).assoc_account(None, update=False) - current_app.logger.info("Syncing all domains") + current_app.logger.info("Syncing all zones") Domain().update() current_app.logger.debug( @@ -986,6 +1004,7 @@ def api_list_account_users(account_id): methods=['PUT']) @api_basic_auth @api_role_can('add user to account') +@csrf.exempt def api_add_account_user(account_id, user_id): account = Account.query.get(account_id) if not account: @@ -1013,6 +1032,7 @@ def api_add_account_user(account_id, user_id): methods=['DELETE']) @api_basic_auth @api_role_can('remove user from account') +@csrf.exempt def api_remove_account_user(account_id, user_id): account = Account.query.get(account_id) if not account: @@ -1023,7 +1043,7 @@ def api_remove_account_user(account_id, user_id): user_list = User.query.join(AccountUser).filter( AccountUser.account_id == account_id, AccountUser.user_id == user_id, - ).all() + ).all() if not user_list: abort(404) if not account.remove_user(user): @@ -1044,6 +1064,7 @@ def api_remove_account_user(account_id, user_id): @apikey_auth @apikey_can_access_domain @apikey_can_configure_dnssec(http_methods=['POST']) +@csrf.exempt def api_zone_cryptokeys(server_id, zone_id): resp = helper.forward_request() return resp.content, resp.status_code, resp.headers.items() @@ -1055,6 +1076,7 @@ def api_zone_cryptokeys(server_id, zone_id): @apikey_auth @apikey_can_access_domain @apikey_can_configure_dnssec() +@csrf.exempt def api_zone_cryptokey(server_id, zone_id, cryptokey_id): resp = helper.forward_request() return resp.content, resp.status_code, resp.headers.items() @@ -1065,6 +1087,7 @@ def api_zone_cryptokey(server_id, zone_id, cryptokey_id): methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE']) @apikey_auth @apikey_can_access_domain +@csrf.exempt def api_zone_subpath_forward(server_id, zone_id, subpath): resp = helper.forward_request() return resp.content, resp.status_code, resp.headers.items() @@ -1073,40 +1096,46 @@ def api_zone_subpath_forward(server_id, zone_id, subpath): @api_bp.route('/servers//zones/', methods=['GET', 'PUT', 'PATCH', 'DELETE']) @apikey_auth +@allowed_record_types +@allowed_record_ttl @apikey_can_access_domain @apikey_can_remove_domain(http_methods=['DELETE']) @callback_if_request_body_contains_key(apikey_can_configure_dnssec()(), http_methods=['PUT'], keys=['dnssec', 'nsec3param']) +@csrf.exempt def api_zone_forward(server_id, zone_id): resp = helper.forward_request() if not Setting().get('bg_domain_updates'): domain = Domain() domain.update() status = resp.status_code + created_by_value=is_custom_header_api() if 200 <= status < 300: current_app.logger.debug("Request to powerdns API successful") if Setting().get('enable_api_rr_history'): - if request.method in ['POST', 'PATCH'] : + if request.method in ['POST', 'PATCH']: data = request.get_json(force=True) - for rrset_data in data['rrsets']: - history = History(msg='{0} zone {1} record of {2}'.format( - rrset_data['changetype'].lower(), rrset_data['type'], - rrset_data['name'].rstrip('.')), - detail=json.dumps(data), - created_by=g.apikey.description, - domain_id=Domain().get_id_by_name(zone_id.rstrip('.'))) - history.add() + history = History( + msg='Apply record changes to zone {0}'.format(zone_id.rstrip('.')), + detail = json.dumps({ + 'domain': zone_id.rstrip('.'), + 'add_rrsets': list(filter(lambda r: r['changetype'] == "REPLACE", data['rrsets'])), + 'del_rrsets': list(filter(lambda r: r['changetype'] == "DELETE", data['rrsets'])) + }), + created_by=created_by_value, + domain_id=Domain().get_id_by_name(zone_id.rstrip('.'))) + history.add() elif request.method == 'DELETE': history = History(msg='Deleted zone {0}'.format(zone_id.rstrip('.')), detail='', - created_by=g.apikey.description, + created_by=created_by_value, domain_id=Domain().get_id_by_name(zone_id.rstrip('.'))) history.add() elif request.method != 'GET': history = History(msg='Updated zone {0}'.format(zone_id.rstrip('.')), detail='', - created_by=g.apikey.description, + created_by=created_by_value, domain_id=Domain().get_id_by_name(zone_id.rstrip('.'))) history.add() return resp.content, resp.status_code, resp.headers.items() @@ -1115,6 +1144,7 @@ def api_zone_forward(server_id, zone_id): @api_bp.route('/servers/', methods=['GET', 'PUT']) @apikey_auth @apikey_is_admin +@csrf.exempt def api_server_sub_forward(subpath): resp = helper.forward_request() return resp.content, resp.status_code, resp.headers.items() @@ -1123,26 +1153,28 @@ def api_server_sub_forward(subpath): @api_bp.route('/servers//zones', methods=['POST']) @apikey_auth @apikey_can_create_domain +@csrf.exempt def api_create_zone(server_id): resp = helper.forward_request() if resp.status_code == 201: current_app.logger.debug("Request to powerdns API successful") + created_by_value=is_custom_header_api() data = request.get_json(force=True) if g.apikey.role.name not in ['Administrator', 'Operator']: current_app.logger.debug( - "Apikey is user key, assigning created domain") + "Apikey is user key, assigning created zone") domain = Domain(name=data['name'].rstrip('.')) g.apikey.domains.append(domain) domain = Domain() domain.update() - history = History(msg='Add domain {0}'.format( + history = History(msg='Add zone {0}'.format( data['name'].rstrip('.')), detail=json.dumps(data), - created_by=g.apikey.description, + created_by=created_by_value, domain_id=domain.get_id_by_name(data['name'].rstrip('.'))) history.add() @@ -1160,15 +1192,13 @@ def api_get_zones(server_id): return jsonify(domain_schema.dump(domain_obj_list)), 200 else: resp = helper.forward_request() - if (g.apikey.role.name not in ['Administrator', 'Operator'] - and resp.status_code == 200): + if (g.apikey.role.name not in ['Administrator', 'Operator'] and resp.status_code == 200): domain_list = [d['name'] for d in domain_schema.dump(g.apikey.domains)] accounts_domains = [d.name for a in g.apikey.accounts for d in a.domains] allowed_domains = set(domain_list + accounts_domains) - current_app.logger.debug("Account domains: {}".format( - '/'.join(accounts_domains))) + current_app.logger.debug("Account zones: {}".format('/'.join(accounts_domains))) content = json.dumps([i for i in json.loads(resp.content) if i['name'].rstrip('.') in allowed_domains]) return content, resp.status_code, resp.headers.items() @@ -1182,12 +1212,14 @@ def api_server_forward(): resp = helper.forward_request() return resp.content, resp.status_code, resp.headers.items() + @api_bp.route('/servers/', methods=['GET']) @apikey_auth def api_server_config_forward(server_id): resp = helper.forward_request() return resp.content, resp.status_code, resp.headers.items() + # The endpoint to synchronize Domains in background @api_bp.route('/sync_domains', methods=['GET']) @apikey_or_basic_auth @@ -1195,3 +1227,23 @@ def sync_domains(): domain = Domain() domain.update() return 'Finished synchronization in background', 200 + + +@api_bp.route('/health', methods=['GET']) +@apikey_auth +def health(): + domain = Domain() + domain_to_query = domain.query.first() + + if not domain_to_query: + current_app.logger.error("No zone found to query a health check") + return make_response("Unknown", 503) + + try: + domain.get_domain_info(domain_to_query.name) + except Exception as e: + current_app.logger.error( + "Health Check - Failed to query authoritative server for zone {}".format(domain_to_query.name)) + return make_response("Down", 503) + + return make_response("Up", 200) diff --git a/powerdnsadmin/routes/base.py b/powerdnsadmin/routes/base.py index 48ef1c03b..f805c9070 100644 --- a/powerdnsadmin/routes/base.py +++ b/powerdnsadmin/routes/base.py @@ -1,9 +1,15 @@ import base64 + from flask import render_template, url_for, redirect, session, request, current_app from flask_login import LoginManager +from flask_seasurf import SeaSurf +from flask_session_captcha import FlaskSessionCaptcha from ..models.user import User + +captcha = FlaskSessionCaptcha() +csrf = SeaSurf() login_manager = LoginManager() @@ -54,15 +60,31 @@ def login_via_authorization_header_or_remote_user(request): # Try to login using Basic Authentication auth_header = request.headers.get('Authorization') if auth_header: + + if auth_header[:6] != "Basic ": + return None + auth_method = request.args.get('auth_method', 'LOCAL') auth_method = 'LDAP' if auth_method != 'LOCAL' else 'LOCAL' - auth_header = auth_header.replace('Basic ', '', 1) + + # Remove "Basic " from the header value + auth_header = auth_header[6:] + try: auth_header = str(base64.b64decode(auth_header), 'utf-8') - username, password = auth_header.split(":") - except TypeError as e: + except (UnicodeDecodeError, TypeError) as e: return None + # NK: We use auth_components here as we don't know if we'll have a :, we split it maximum 1 times to grab the + # username, the rest of the string would be the password. + auth_components = auth_header.split(':', maxsplit=1) + + # If we don't have two auth components (username, password), we can return + if len(auth_components) != 2: + return None + + (username, password) = auth_components + user = User(username=username, password=password, plain_text_password=password) diff --git a/powerdnsadmin/routes/dashboard.py b/powerdnsadmin/routes/dashboard.py index 8cf1b1293..e517207d8 100644 --- a/powerdnsadmin/routes/dashboard.py +++ b/powerdnsadmin/routes/dashboard.py @@ -1,10 +1,10 @@ import datetime -from flask import Blueprint, render_template, url_for, current_app, request, jsonify, redirect, g, session +from collections import namedtuple +from flask import Blueprint, render_template, url_for, current_app, request, jsonify, redirect, g, session, abort from flask_login import login_required, current_user, login_manager from sqlalchemy import not_ from ..decorators import operator_role_required -from ..lib.utils import customBoxes from ..models.user import User, Anonymous from ..models.account import Account from ..models.account_user import AccountUser @@ -21,6 +21,31 @@ url_prefix='/dashboard') +class ZoneTabs: + """Config data for the zone tabs on the dashboard.""" + + TabInfo = namedtuple('TabInfo', ['display_name', 'filter_pattern']) + """Info about a single tab. + + `display_name` is the name on the tab. + `filter_pattern` is a SQL LIKE pattern , which is case-insensitively matched against the zone + name (without the final root-dot). + + If a filter is present, the tab will show zones that match the filter. + If no filter is present, the tab will show zones that are not matched by any other tab filter. + """ + + tabs = { + 'forward': TabInfo("", None), + 'reverse_ipv4': TabInfo("in-addr.arpa", '%.in-addr.arpa'), + 'reverse_ipv6': TabInfo("ip6.arpa", '%.ip6.arpa'), + } + """Dict of unique tab id to a TabInfo.""" + + order = ['forward', 'reverse_ipv4', 'reverse_ipv6'] + """List of tab ids in the order they will appear.""" + + @dashboard_bp.before_request def before_request(): # Check if user is anonymous @@ -30,7 +55,7 @@ def before_request(): # Check site is in maintenance mode maintenance = Setting().get('maintenance') if maintenance and current_user.is_authenticated and current_user.role.name not in [ - 'Administrator', 'Operator' + 'Administrator', 'Operator' ]: return render_template('maintenance.html') @@ -41,9 +66,12 @@ def before_request(): session.modified = True -@dashboard_bp.route('/domains-custom/', methods=['GET']) +@dashboard_bp.route('/domains-custom/', methods=['GET']) @login_required -def domains_custom(boxId): +def domains_custom(tab_id): + if tab_id not in ZoneTabs.tabs: + abort(404) + if current_user.role.name in ['Administrator', 'Operator']: domains = Domain.query else: @@ -55,13 +83,14 @@ def domains_custom(boxId): .outerjoin(Account, Domain.account_id == Account.id) \ .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ .filter( - db.or_( - DomainUser.user_id == current_user.id, - AccountUser.user_id == current_user.id - )) + db.or_( + DomainUser.user_id == current_user.id, + AccountUser.user_id == current_user.id + )) template = current_app.jinja_env.get_template("dashboard_domain.html") - render = template.make_module(vars={"current_user": current_user, "allow_user_view_history": Setting().get('allow_user_view_history')}) + render = template.make_module( + vars={"current_user": current_user, "allow_user_view_history": Setting().get('allow_user_view_history')}) columns = [ Domain.name, Domain.dnssec, Domain.type, Domain.serial, Domain.master, @@ -83,14 +112,15 @@ def domains_custom(boxId): if order_by: domains = domains.order_by(*order_by) - if boxId == "reverse": - for boxId in customBoxes.order: - if boxId == "reverse": continue - domains = domains.filter( - not_(Domain.name.ilike(customBoxes.boxes[boxId][1]))) + if ZoneTabs.tabs[tab_id].filter_pattern: + # If the tab has a filter, use only that + domains = domains.filter(Domain.name.ilike(ZoneTabs.tabs[tab_id].filter_pattern)) else: - domains = domains.filter(Domain.name.ilike( - customBoxes.boxes[boxId][1])) + # If the tab has no filter, use all the other filters in negated form + for tab_info in ZoneTabs.tabs.values(): + if not tab_info.filter_pattern: + continue + domains = domains.filter(not_(Domain.name.ilike(tab_info.filter_pattern))) total_count = domains.count() @@ -111,7 +141,7 @@ def domains_custom(boxId): filtered_count = domains.count() start = int(request.args.get("start", 0)) - length = min(int(request.args.get("length", 0)), 100) + length = min(int(request.args.get("length", 0)), max(100, int(Setting().get('default_domain_table_size')))) if length != -1: domains = domains[start:start + length] @@ -146,68 +176,18 @@ def dashboard(): BG_DOMAIN_UPDATE = Setting().get('bg_domain_updates') if not BG_DOMAIN_UPDATE: - current_app.logger.info('Updating domains in foreground...') + current_app.logger.info('Updating zones in foreground...') Domain().update() else: - current_app.logger.info('Updating domains in background...') + current_app.logger.info('Updating zones in background...') show_bg_domain_button = BG_DOMAIN_UPDATE if BG_DOMAIN_UPDATE and current_user.role.name not in ['Administrator', 'Operator']: show_bg_domain_button = False - # Stats for dashboard - domain_count = 0 - history_number = 0 - history = [] - user_num = User.query.count() - if current_user.role.name in ['Administrator', 'Operator']: - domain_count = Domain.query.count() - history_number = History.query.count() - history = History.query.order_by(History.created_on.desc()).limit(4).all() - elif Setting().get('allow_user_view_history'): - history = db.session.query(History) \ - .join(Domain, History.domain_id == Domain.id) \ - .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ - .outerjoin(Account, Domain.account_id == Account.id) \ - .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ - .order_by(History.created_on.desc()) \ - .filter( - db.or_( - DomainUser.user_id == current_user.id, - AccountUser.user_id == current_user.id - )).all() - history_number = len(history) # history.count() - history = history[:4] - domain_count = db.session.query(Domain) \ - .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ - .outerjoin(Account, Domain.account_id == Account.id) \ - .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ - .filter( - db.or_( - DomainUser.user_id == current_user.id, - AccountUser.user_id == current_user.id - )).count() - - from .admin import convert_histories, DetailedHistory - detailedHistories = convert_histories(history) - - server = Server(server_id='localhost') - statistics = server.get_statistic() - if statistics: - uptime = list([ - uptime for uptime in statistics if uptime['name'] == 'uptime' - ])[0]['value'] - else: - uptime = 0 - # Add custom boxes to render_template return render_template('dashboard.html', - custom_boxes=customBoxes, - domain_count=domain_count, - user_num=user_num, - history_number=history_number, - uptime=uptime, - histories=detailedHistories, + zone_tabs=ZoneTabs, show_bg_domain_button=show_bg_domain_button, pdns_version=Setting().get('pdns_version')) @@ -216,7 +196,7 @@ def dashboard(): @login_required @operator_role_required def domains_updater(): - current_app.logger.debug('Update domains in background') + current_app.logger.debug('Update zones in background') d = Domain().update() response_data = { diff --git a/powerdnsadmin/routes/domain.py b/powerdnsadmin/routes/domain.py index fe9e98c2b..6cd9d38b5 100644 --- a/powerdnsadmin/routes/domain.py +++ b/powerdnsadmin/routes/domain.py @@ -10,6 +10,7 @@ from ..lib.utils import pretty_domain_name from ..lib.utils import pretty_json +from ..lib.utils import to_idna from ..decorators import can_create_domain, operator_role_required, can_access_domain, can_configure_dnssec, can_remove_domain from ..models.user import User, Anonymous from ..models.account import Account @@ -24,14 +25,13 @@ from ..models.base import db from ..models.domain_user import DomainUser from ..models.account_user import AccountUser -from .admin import extract_changelogs_from_a_history_entry +from .admin import extract_changelogs_from_history from ..decorators import history_access_required domain_bp = Blueprint('domain', __name__, template_folder='templates', url_prefix='/domain') - @domain_bp.before_request def before_request(): # Check if user is anonymous @@ -63,18 +63,18 @@ def domain(domain_name): # Query domain's rrsets from PowerDNS API rrsets = Record().get_rrsets(domain.name) - current_app.logger.debug("Fetched rrests: \n{}".format(pretty_json(rrsets))) + current_app.logger.debug("Fetched rrsets: \n{}".format(pretty_json(rrsets))) # API server might be down, misconfigured - if not rrsets and domain.type != 'Slave': + if not rrsets and str(domain.type).lower() != 'slave': abort(500) quick_edit = Setting().get('record_quick_edit') records_allow_to_edit = Setting().get_records_allow_to_edit() forward_records_allow_to_edit = Setting( - ).get_forward_records_allow_to_edit() + ).get_supported_record_types(Setting().ZONE_TYPE_FORWARD) reverse_records_allow_to_edit = Setting( - ).get_reverse_records_allow_to_edit() + ).get_supported_record_types(Setting().ZONE_TYPE_REVERSE) ttl_options = Setting().get_ttl_options() records = [] @@ -89,14 +89,14 @@ def domain(domain_name): # - Find a way to make it consistent, or # - Only allow one comment for that case if StrictVersion(Setting().get('pdns_version')) >= StrictVersion('4.0.0'): + pretty_v6 = Setting().get('pretty_ipv6_ptr') for r in rrsets: if r['type'] in records_allow_to_edit: r_name = r['name'].rstrip('.') # If it is reverse zone and pretty_ipv6_ptr setting # is enabled, we reformat the name for ipv6 records. - if Setting().get('pretty_ipv6_ptr') and r[ - 'type'] == 'PTR' and 'ip6.arpa' in r_name and '*' not in r_name: + if pretty_v6 and r['type'] == 'PTR' and 'ip6.arpa' in r_name and '*' not in r_name: r_name = dns.reversename.to_address( dns.name.from_text(r_name)) @@ -133,7 +133,8 @@ def domain(domain_name): editable_records=editable_records, quick_edit=quick_edit, ttl_options=ttl_options, - current_user=current_user) + current_user=current_user, + allow_user_view_history=Setting().get('allow_user_view_history')) @domain_bp.route('/remove', methods=['GET', 'POST']) @@ -177,7 +178,7 @@ def remove(): if result['status'] == 'error': abort(500) - history = History(msg='Delete domain {0}'.format( + history = History(msg='Delete zone {0}'.format( pretty_domain_name(domain_name)), created_by=current_user.username) history.add() @@ -200,17 +201,6 @@ def changelog(domain_name): if not domain: abort(404) - # Query domain's rrsets from PowerDNS API - rrsets = Record().get_rrsets(domain.name) - current_app.logger.debug("Fetched rrests: \n{}".format(pretty_json(rrsets))) - - # API server might be down, misconfigured - if not rrsets and domain.type != 'Slave': - abort(500) - - records_allow_to_edit = Setting().get_records_allow_to_edit() - records = [] - # get all changelogs for this domain, in descening order if current_user.role.name in [ 'Administrator', 'Operator' ]: histories = History.query.filter(History.domain_id == domain.id).order_by(History.created_on.desc()).all() @@ -229,55 +219,19 @@ def changelog(domain_name): DomainUser.user_id == current_user.id, AccountUser.user_id == current_user.id ), - History.domain_id == domain.id + History.domain_id == domain.id, + History.detail.isnot(None) ) ).all() - if StrictVersion(Setting().get('pdns_version')) >= StrictVersion('4.0.0'): - for r in rrsets: - if r['type'] in records_allow_to_edit: - r_name = r['name'].rstrip('.') - - # If it is reverse zone and pretty_ipv6_ptr setting - # is enabled, we reformat the name for ipv6 records. - if Setting().get('pretty_ipv6_ptr') and r[ - 'type'] == 'PTR' and 'ip6.arpa' in r_name and '*' not in r_name: - r_name = dns.reversename.to_address( - dns.name.from_text(r_name)) - - # Create the list of records in format that - # PDA jinja2 template can understand. - index = 0 - for record in r['records']: - if (len(r['comments'])>index): - c=r['comments'][index]['content'] - else: - c='' - record_entry = RecordEntry( - name=r_name, - type=r['type'], - status='Disabled' if record['disabled'] else 'Active', - ttl=r['ttl'], - data=record['content'], - comment=c, - is_allowed_edit=True) - index += 1 - records.append(record_entry) - else: - # Unsupported version - abort(500) + changes_set = extract_changelogs_from_history(histories) - changes_set = dict() - for i in range(len(histories)): - extract_changelogs_from_a_history_entry(changes_set, histories[i], i) - if i in changes_set and len(changes_set[i]) == 0: # if empty, then remove the key - changes_set.pop(i) return render_template('domain_changelog.html', domain=domain, allHistoryChanges=changes_set) """ Returns a changelog for a specific pair of (record_name, record_type) """ -@domain_bp.route('//changelog/-', methods=['GET']) +@domain_bp.route('//changelog//', methods=['GET']) @login_required @can_access_domain @history_access_required @@ -288,17 +242,18 @@ def record_changelog(domain_name, record_name, record_type): domain = Domain.query.filter(Domain.name == domain_name).first() if not domain: abort(404) - # Query domain's rrsets from PowerDNS API - rrsets = Record().get_rrsets(domain.name) - current_app.logger.debug("Fetched rrests: \n{}".format(pretty_json(rrsets))) - - # API server might be down, misconfigured - if not rrsets and domain.type != 'Slave': - abort(500) # get all changelogs for this domain, in descening order if current_user.role.name in [ 'Administrator', 'Operator' ]: - histories = History.query.filter(History.domain_id == domain.id).order_by(History.created_on.desc()).all() + histories = History.query \ + .filter( + db.and_( + History.domain_id == domain.id, + History.detail.like("%{}%".format(record_name)) + ) + ) \ + .order_by(History.created_on.desc()) \ + .all() else: # if the user isn't an administrator or operator, # allow_user_view_history must be enabled to get here, @@ -308,42 +263,22 @@ def record_changelog(domain_name, record_name, record_type): .outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \ .outerjoin(Account, Domain.account_id == Account.id) \ .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ - .order_by(History.created_on.desc()) \ .filter( db.and_(db.or_( DomainUser.user_id == current_user.id, AccountUser.user_id == current_user.id ), - History.domain_id == domain.id + History.domain_id == domain.id, + History.detail.like("%{}%".format(record_name)) ) - ).all() - - changes_set_of_record = dict() - for i in range(len(histories)): - extract_changelogs_from_a_history_entry(changes_set_of_record, histories[i], i, record_name, record_type) - if i in changes_set_of_record and len(changes_set_of_record[i]) == 0: # if empty, then remove the key - changes_set_of_record.pop(i) - - indexes_to_pop = [] - for change_num in changes_set_of_record: - changes_i = changes_set_of_record[change_num] - for hre in changes_i: # for each history record entry in changes_i - if 'type' in hre.add_rrest and hre.add_rrest['name'] == record_name and hre.add_rrest['type'] == record_type: - continue - elif 'type' in hre.del_rrest and hre.del_rrest['name'] == record_name and hre.del_rrest['type'] == record_type: - continue - else: - changes_set_of_record[change_num].remove(hre) - if change_num in changes_set_of_record and len(changes_set_of_record[change_num]) == 0: # if empty, then remove the key - indexes_to_pop.append(change_num) - - for i in indexes_to_pop: - changes_set_of_record.pop(i) - - return render_template('domain_changelog.html', domain=domain, allHistoryChanges=changes_set_of_record, - record_name = record_name, record_type = record_type) + ) \ + .order_by(History.created_on.desc()) \ + .all() + changes_set = extract_changelogs_from_history(histories, record_name, record_type) + return render_template('domain_changelog.html', domain=domain, allHistoryChanges=changes_set, + record_name = record_name, record_type = record_type) @domain_bp.route('/add', methods=['GET', 'POST']) @login_required @@ -361,7 +296,10 @@ def add(): if ' ' in domain_name or not domain_name or not domain_type: return render_template( 'errors/400.html', - msg="Please enter a valid domain name"), 400 + msg="Please enter a valid zone name"), 400 + + if domain_name.endswith('.'): + domain_name = domain_name[:-1] # If User creates the domain, check some additional stuff if current_user.role.name not in ['Administrator', 'Operator']: @@ -379,13 +317,13 @@ def add(): # Encode domain name into punycode (IDN) try: - domain_name = domain_name.encode('idna').decode() + domain_name = to_idna(domain_name, 'encode') except: - current_app.logger.error("Cannot encode the domain name {}".format(domain_name)) + current_app.logger.error("Cannot encode the zone name {}".format(domain_name)) current_app.logger.debug(traceback.format_exc()) return render_template( 'errors/400.html', - msg="Please enter a valid domain name"), 400 + msg="Please enter a valid zone name"), 400 if domain_type == 'slave': if request.form.getlist('domain_master_address'): @@ -400,6 +338,38 @@ def add(): account_name = Account().get_name_by_id(account_id) d = Domain() + + ### Test if a record same as the domain already exists in an upper level domain + if Setting().get('deny_domain_override'): + + upper_domain = None + domain_override = False + domain_override_toggle = False + + if current_user.role.name in ['Administrator', 'Operator']: + domain_override = request.form.get('domain_override') + domain_override_toggle = True + + + # If overriding box is not selected. + # False = Do not allow ovrriding, perform checks + # True = Allow overriding, do not perform checks + if not domain_override: + upper_domain = d.is_overriding(domain_name) + + if upper_domain: + if current_user.role.name in ['Administrator', 'Operator']: + accounts = Account.query.order_by(Account.name).all() + else: + accounts = current_user.get_accounts() + + msg = 'Zone already exists as a record under zone: {}'.format(upper_domain) + + return render_template('domain_add.html', + domain_override_message=msg, + accounts=accounts, + domain_override_toggle=domain_override_toggle) + result = d.add(domain_name=domain_name, domain_type=domain_type, soa_edit_api=soa_edit_api, @@ -407,7 +377,7 @@ def add(): account_name=account_name) if result['status'] == 'ok': domain_id = Domain().get_id_by_name(domain_name) - history = History(msg='Add domain {0}'.format( + history = History(msg='Add zone {0}'.format( pretty_domain_name(domain_name)), detail = json.dumps({ 'domain_type': domain_type, @@ -450,9 +420,9 @@ def add(): domain_name, 'template': template.name, - 'add_rrests': + 'add_rrsets': result['data'][0]['rrsets'], - 'del_rrests': + 'del_rrsets': result['data'][1]['rrsets'] }), created_by=current_user.username, @@ -471,20 +441,23 @@ def add(): return render_template('errors/400.html', msg=result['msg']), 400 except Exception as e: - current_app.logger.error('Cannot add domain. Error: {0}'.format(e)) + current_app.logger.error('Cannot add zone. Error: {0}'.format(e)) current_app.logger.debug(traceback.format_exc()) abort(500) # Get else: + domain_override_toggle = False # Admins and Operators can set to any account if current_user.role.name in ['Administrator', 'Operator']: accounts = Account.query.order_by(Account.name).all() + domain_override_toggle = True else: accounts = current_user.get_accounts() return render_template('domain_add.html', templates=templates, - accounts=accounts) + accounts=accounts, + domain_override_toggle=domain_override_toggle) @@ -498,7 +471,7 @@ def delete(domain_name): if result['status'] == 'error': abort(500) - history = History(msg='Delete domain {0}'.format( + history = History(msg='Delete zone {0}'.format( pretty_domain_name(domain_name)), created_by=current_user.username) history.add() @@ -521,13 +494,17 @@ def setting(domain_name): d = Domain(name=domain_name) domain_user_ids = d.get_user() account = d.get_account() + domain_info = d.get_domain_info(domain_name) return render_template('domain_setting.html', domain=domain, users=users, domain_user_ids=domain_user_ids, accounts=accounts, - domain_account=account) + domain_account=account, + zone_type=domain_info["kind"].lower(), + masters=','.join(domain_info["masters"]), + soa_edit_api=domain_info["soa_edit_api"].upper()) if request.method == 'POST': # username in right column @@ -542,7 +519,7 @@ def setting(domain_name): d.grant_privileges(new_user_ids) history = History( - msg='Change domain {0} access control'.format( + msg='Change zone {0} access control'.format( pretty_domain_name(domain_name)), detail=json.dumps({'user_has_access': new_user_list}), created_by=current_user.username, @@ -580,7 +557,7 @@ def change_type(domain_name): kind=domain_type, masters=domain_master_ips) if status['status'] == 'ok': - history = History(msg='Update type for domain {0}'.format( + history = History(msg='Update type for zone {0}'.format( pretty_domain_name(domain_name)), detail=json.dumps({ "domain": domain_name, @@ -614,7 +591,7 @@ def change_soa_edit_api(domain_name): soa_edit_api=new_setting) if status['status'] == 'ok': history = History( - msg='Update soa_edit_api for domain {0}'.format( + msg='Update soa_edit_api for zone {0}'.format( pretty_domain_name(domain_name)), detail = json.dumps({ 'domain': domain_name, @@ -658,7 +635,7 @@ def record_apply(domain_name): domain = Domain.query.filter(Domain.name == domain_name).first() if domain: - current_app.logger.debug('Current domain serial: {0}'.format( + current_app.logger.debug('Current zone serial: {0}'.format( domain.serial)) if int(submitted_serial) != domain.serial: @@ -675,18 +652,18 @@ def record_apply(domain_name): 'status': 'error', 'msg': - 'Domain name {0} does not exist'.format(pretty_domain_name(domain_name)) + 'Zone name {0} does not exist'.format(pretty_domain_name(domain_name)) }), 404) r = Record() result = r.apply(domain_name, submitted_record) if result['status'] == 'ok': history = History( - msg='Apply record changes to domain {0}'.format(pretty_domain_name(domain_name)), + msg='Apply record changes to zone {0}'.format(pretty_domain_name(domain_name)), detail = json.dumps({ 'domain': domain_name, - 'add_rrests': result['data'][0]['rrsets'], - 'del_rrests': result['data'][1]['rrsets'] + 'add_rrsets': result['data'][0]['rrsets'], + 'del_rrsets': result['data'][1]['rrsets'] }), created_by=current_user.username, domain_id=domain.id) @@ -694,7 +671,7 @@ def record_apply(domain_name): return make_response(jsonify(result), 200) else: history = History( - msg='Failed to apply record changes to domain {0}'.format( + msg='Failed to apply record changes to zone {0}'.format( pretty_domain_name(domain_name)), detail = json.dumps({ 'domain': domain_name, @@ -721,7 +698,7 @@ def record_apply(domain_name): @can_access_domain def record_update(domain_name): """ - This route is used for domain work as Slave Zone only + This route is used for zone work as Slave Zone only Pulling the records update from its Master """ try: @@ -779,7 +756,7 @@ def dnssec_enable(domain_name): dnssec = domain.enable_domain_dnssec(domain_name) domain_object = Domain.query.filter(domain_name == Domain.name).first() history = History( - msg='DNSSEC was enabled for domain ' + domain_name , + msg='DNSSEC was enabled for zone ' + domain_name , created_by=current_user.username, domain_id=domain_object.id) history.add() @@ -798,7 +775,7 @@ def dnssec_disable(domain_name): domain.delete_dnssec_key(domain_name, key['id']) domain_object = Domain.query.filter(domain_name == Domain.name).first() history = History( - msg='DNSSEC was disabled for domain ' + domain_name , + msg='DNSSEC was disabled for zone ' + domain_name , created_by=current_user.username, domain_id=domain_object.id) history.add() @@ -875,7 +852,7 @@ def admin_setdomainsetting(domain_name): }), 400) except Exception as e: current_app.logger.error( - 'Cannot change domain setting. Error: {0}'.format(e)) + 'Cannot change zone setting. Error: {0}'.format(e)) current_app.logger.debug(traceback.format_exc()) return make_response( jsonify({ diff --git a/powerdnsadmin/routes/index.py b/powerdnsadmin/routes/index.py index f9a564929..23d88bbdd 100644 --- a/powerdnsadmin/routes/index.py +++ b/powerdnsadmin/routes/index.py @@ -5,12 +5,14 @@ import datetime import ipaddress import base64 +import string +from zxcvbn import zxcvbn from distutils.util import strtobool from yaml import Loader, load from flask import Blueprint, render_template, make_response, url_for, current_app, g, session, request, redirect, abort from flask_login import login_user, logout_user, login_required, current_user -from .base import login_manager +from .base import captcha, csrf, login_manager from ..lib import utils from ..decorators import dyndns_login_required from ..models.base import db @@ -43,6 +45,7 @@ template_folder='templates', url_prefix='/') + @index_bp.before_app_first_request def register_modules(): global google @@ -66,7 +69,7 @@ def before_request(): # Check site is in maintenance mode maintenance = Setting().get('maintenance') if maintenance and current_user.is_authenticated and current_user.role.name not in [ - 'Administrator', 'Operator' + 'Administrator', 'Operator' ]: return render_template('maintenance.html') @@ -96,7 +99,11 @@ def google_login(): ) abort(400) else: - redirect_uri = url_for('google_authorized', _external=True) + use_ssl = current_app.config.get('SERVER_EXTERNAL_SSL') + params = {'_external': True} + if isinstance(use_ssl, bool): + params['_scheme'] = 'https' if use_ssl else 'http' + redirect_uri = url_for('google_authorized', **params) return google.authorize_redirect(redirect_uri) @@ -108,7 +115,11 @@ def github_login(): ) abort(400) else: - redirect_uri = url_for('github_authorized', _external=True) + use_ssl = current_app.config.get('SERVER_EXTERNAL_SSL') + params = {'_external': True} + if isinstance(use_ssl, bool): + params['_scheme'] = 'https' if use_ssl else 'http' + redirect_uri = url_for('github_authorized', **params) return github.authorize_redirect(redirect_uri) @@ -120,9 +131,11 @@ def azure_login(): ) abort(400) else: - redirect_uri = url_for('azure_authorized', - _external=True, - _scheme='https') + use_ssl = current_app.config.get('SERVER_EXTERNAL_SSL') + params = {'_external': True} + if isinstance(use_ssl, bool): + params['_scheme'] = 'https' if use_ssl else 'http' + redirect_uri = url_for('azure_authorized', **params) return azure.authorize_redirect(redirect_uri) @@ -134,31 +147,35 @@ def oidc_login(): ) abort(400) else: - redirect_uri = url_for('oidc_authorized', _external=True) + use_ssl = current_app.config.get('SERVER_EXTERNAL_SSL') + params = {'_external': True} + if isinstance(use_ssl, bool): + params['_scheme'] = 'https' if use_ssl else 'http' + redirect_uri = url_for('oidc_authorized', **params) return oidc.authorize_redirect(redirect_uri) @index_bp.route('/login', methods=['GET', 'POST']) def login(): - SAML_ENABLED = current_app.config.get('SAML_ENABLED') + SAML_ENABLED = current_app.config.get('SAML_ENABLED', False) if g.user is not None and current_user.is_authenticated: return redirect(url_for('dashboard.dashboard')) if 'google_token' in session: user_data = json.loads(google.get('userinfo').text) - first_name = user_data['given_name'] - surname = user_data['family_name'] - email = user_data['email'] - user = User.query.filter_by(username=email).first() + google_first_name = user_data['given_name'] + google_last_name = user_data['family_name'] + google_email = user_data['email'] + user = User.query.filter_by(username=google_email).first() if user is None: - user = User.query.filter_by(email=email).first() + user = User.query.filter_by(email=google_email).first() if not user: - user = User(username=email, - firstname=first_name, - lastname=surname, + user = User(username=google_email, + firstname=google_first_name, + lastname=google_last_name, plain_text_password=None, - email=email) + email=google_email) result = user.create_local_user() if not result['status']: @@ -170,10 +187,18 @@ def login(): return authenticate_user(user, 'Google OAuth') if 'github_token' in session: - me = json.loads(github.get('user').text) - github_username = me['login'] - github_name = me['name'] - github_email = me['email'] + user_data = json.loads(github.get('user').text) + github_username = user_data['login'] + github_first_name = user_data['name'] + github_last_name = '' + github_email = user_data['email'] + + # If the user's full name from GitHub contains at least two words, use the first word as the first name and + # the rest as the last name. + github_name_parts = github_first_name.split(' ') + if len(github_name_parts) > 1: + github_first_name = github_name_parts[0] + github_last_name = ' '.join(github_name_parts[1:]) user = User.query.filter_by(username=github_username).first() if user is None: @@ -181,8 +206,8 @@ def login(): if not user: user = User(username=github_username, plain_text_password=None, - firstname=github_name, - lastname='', + firstname=github_first_name, + lastname=github_last_name, email=github_email) result = user.create_local_user() @@ -196,8 +221,8 @@ def login(): if 'azure_token' in session: azure_info = azure.get('me?$select=displayName,givenName,id,mail,surname,userPrincipalName').text - current_app.logger.info('Azure login returned: '+azure_info) - me = json.loads(azure_info) + current_app.logger.info('Azure login returned: ' + azure_info) + user_data = json.loads(azure_info) azure_info = azure.post('me/getMemberGroups', json={'securityEnabledOnly': False}).text @@ -209,15 +234,15 @@ def login(): else: mygroups = [] - azure_username = me["userPrincipalName"] - azure_givenname = me["givenName"] - azure_familyname = me["surname"] - if "mail" in me: - azure_email = me["mail"] + azure_username = user_data["userPrincipalName"] + azure_first_name = user_data["givenName"] + azure_last_name = user_data["surname"] + if "mail" in user_data: + azure_email = user_data["mail"] else: azure_email = "" if not azure_email: - azure_email = me["userPrincipalName"] + azure_email = user_data["userPrincipalName"] # Handle foreign principals such as guest users azure_email = re.sub(r"#.*$", "", azure_email) @@ -227,13 +252,13 @@ def login(): if not user: user = User(username=azure_username, plain_text_password=None, - firstname=azure_givenname, - lastname=azure_familyname, + firstname=azure_first_name, + lastname=azure_last_name, email=azure_email) result = user.create_local_user() if not result['status']: - current_app.logger.warning('Unable to create ' + azure_username) + current_app.logger.warning('Unable to create ' + azure_username + ' Reasoning: ' + result['msg']) session.pop('azure_token', None) # note: a redirect to login results in an endless loop, so render the login page instead return render_template('login.html', @@ -248,30 +273,30 @@ def login(): if Setting().get('azure_sg_enabled'): if Setting().get('azure_admin_group') in mygroups: current_app.logger.info('Setting role for user ' + - azure_username + - ' to Administrator due to group membership') + azure_username + + ' to Administrator due to group membership') user.set_role("Administrator") else: if Setting().get('azure_operator_group') in mygroups: current_app.logger.info('Setting role for user ' + - azure_username + - ' to Operator due to group membership') + azure_username + + ' to Operator due to group membership') user.set_role("Operator") else: if Setting().get('azure_user_group') in mygroups: current_app.logger.info('Setting role for user ' + - azure_username + - ' to User due to group membership') + azure_username + + ' to User due to group membership') user.set_role("User") else: current_app.logger.warning('User ' + - azure_username + - ' has no relevant group memberships') + azure_username + + ' has no relevant group memberships') session.pop('azure_token', None) return render_template('login.html', - saml_enabled=SAML_ENABLED, - error=('User ' + azure_username + - ' is not in any authorised groups.')) + saml_enabled=SAML_ENABLED, + error=('User ' + azure_username + + ' is not in any authorised groups.')) # Handle account/group creation, if enabled if Setting().get('azure_group_accounts_enabled') and mygroups: @@ -324,14 +349,15 @@ def login(): continue account = Account() - account_id = account.get_id_by_name(account_name=group_name) + sanitized_group_name = Account.sanitize_name(group_name) + account_id = account.get_id_by_name(account_name=sanitized_group_name) if account_id: account = Account.query.get(account_id) # check if user has permissions account_users = account.get_user() current_app.logger.info('Group: {} Users: {}'.format( - group_name, + group_name, account_users)) if user.id in account_users: current_app.logger.info('User id {} is already in account {}'.format( @@ -345,13 +371,15 @@ def login(): current_app.logger.info('User {} added to Account {}'.format( user.username, account.name)) else: - account.name = group_name - account.description = group_description - account.contact = '' - account.mail = '' + account = Account( + name=sanitized_group_name, + description=group_description, + contact='', + mail='' + ) account.create_account() history = History(msg='Create account {0}'.format( - account.name), + account.name), created_by='System') history.add() @@ -364,23 +392,23 @@ def login(): return authenticate_user(user, 'Azure OAuth') if 'oidc_token' in session: - me = json.loads(oidc.get('userinfo').text) - oidc_username = me[Setting().get('oidc_oauth_username')] - oidc_givenname = me[Setting().get('oidc_oauth_firstname')] - oidc_familyname = me[Setting().get('oidc_oauth_last_name')] - oidc_email = me[Setting().get('oidc_oauth_email')] + user_data = json.loads(oidc.get('userinfo').text) + oidc_username = user_data[Setting().get('oidc_oauth_username')] + oidc_first_name = user_data[Setting().get('oidc_oauth_firstname')] + oidc_last_name = user_data[Setting().get('oidc_oauth_last_name')] + oidc_email = user_data[Setting().get('oidc_oauth_email')] user = User.query.filter_by(username=oidc_username).first() if not user: user = User(username=oidc_username, plain_text_password=None, - firstname=oidc_givenname, - lastname=oidc_familyname, + firstname=oidc_first_name, + lastname=oidc_last_name, email=oidc_email) result = user.create_local_user() else: - user.firstname = oidc_givenname - user.lastname = oidc_familyname + user.firstname = oidc_first_name + user.lastname = oidc_last_name user.email = oidc_email user.plain_text_password = None result = user.update_local_user() @@ -389,20 +417,22 @@ def login(): session.pop('oidc_token', None) return redirect(url_for('index.login')) - #This checks if the account_name_property and account_description property were included in settings. - if Setting().get('oidc_oauth_account_name_property') and Setting().get('oidc_oauth_account_description_property'): + # This checks if the account_name_property and account_description property were included in settings. + if Setting().get('oidc_oauth_account_name_property') and Setting().get( + 'oidc_oauth_account_description_property'): - #Gets the name_property and description_property. + # Gets the name_property and description_property. name_prop = Setting().get('oidc_oauth_account_name_property') desc_prop = Setting().get('oidc_oauth_account_description_property') account_to_add = [] - #If the name_property and desc_property exist in me (A variable that contains all the userinfo from the IdP). - if name_prop in me and desc_prop in me: - accounts_name_prop = [me[name_prop]] if type(me[name_prop]) is not list else me[name_prop] - accounts_desc_prop = [me[desc_prop]] if type(me[desc_prop]) is not list else me[desc_prop] - - #Run on all groups the user is in by the index num. + # If the name_property and desc_property exist in me (A variable that contains all the userinfo from the + # IdP). + if name_prop in user_data and desc_prop in user_data: + accounts_name_prop = [user_data[name_prop]] if type(user_data[name_prop]) is not list else user_data[name_prop] + accounts_desc_prop = [user_data[desc_prop]] if type(user_data[desc_prop]) is not list else user_data[desc_prop] + + # Run on all groups the user is in by the index num. for i in range(len(accounts_name_prop)): description = '' if i < len(accounts_desc_prop): @@ -411,8 +441,8 @@ def login(): account_to_add.append(account) user_accounts = user.get_accounts() - - # Add accounts + + # Add accounts for account in account_to_add: if account not in user_accounts: account.add_user(user) @@ -421,7 +451,7 @@ def login(): if Setting().get('delete_sso_accounts'): for account in user_accounts: if account not in account_to_add: - account.remove_user(user) + account.remove_user(user) session['user_id'] = user.id session['authentication_type'] = 'OAuth' @@ -485,34 +515,36 @@ def login(): saml_enabled=SAML_ENABLED, error='Token required') - if Setting().get('autoprovisioning') and auth_method!='LOCAL': - urn_value=Setting().get('urn_value') - Entitlements=user.read_entitlements(Setting().get('autoprovisioning_attribute')) - if len(Entitlements)==0 and Setting().get('purge'): + if Setting().get('autoprovisioning') and auth_method != 'LOCAL': + urn_value = Setting().get('urn_value') + Entitlements = user.read_entitlements(Setting().get('autoprovisioning_attribute')) + if len(Entitlements) == 0 and Setting().get('purge'): user.set_role("User") user.revoke_privilege(True) - - elif len(Entitlements)!=0: + + elif len(Entitlements) != 0: if checkForPDAEntries(Entitlements, urn_value): user.updateUser(Entitlements) else: - current_app.logger.warning('Not a single powerdns-admin record was found, possibly a typo in the prefix') + current_app.logger.warning( + 'Not a single powerdns-admin record was found, possibly a typo in the prefix') if Setting().get('purge'): user.set_role("User") user.revoke_privilege(True) - current_app.logger.warning('Procceding to revoke every privilige from ' + user.username + '.' ) + current_app.logger.warning('Procceding to revoke every privilige from ' + user.username + '.') return authenticate_user(user, auth_method, remember_me) + def checkForPDAEntries(Entitlements, urn_value): """ Run through every record located in the ldap attribute given and determine if there are any valid powerdns-admin records """ - urnArguments=[x.lower() for x in urn_value.split(':')] + urnArguments = [x.lower() for x in urn_value.split(':')] for Entitlement in Entitlements: - entArguments=Entitlement.split(':powerdns-admin') - entArguments=[x.lower() for x in entArguments[0].split(':')] - if (entArguments==urnArguments): + entArguments = Entitlement.split(':powerdns-admin') + entArguments = [x.lower() for x in entArguments[0].split(':')] + if (entArguments == urnArguments): return True return False @@ -521,9 +553,10 @@ def clear_session(): session.pop('user_id', None) session.pop('github_token', None) session.pop('google_token', None) + session.pop('azure_token', None) + session.pop('oidc_token', None) session.pop('authentication_type', None) session.pop('remote_user', None) - session.clear() logout_user() @@ -549,14 +582,15 @@ def signin_history(username, authenticator, success): # Write history History(msg='User {} authentication {}'.format(username, str_success), - detail = json.dumps({ - 'username': username, - 'authenticator': authenticator, - 'ip_address': request_ip, - 'success': 1 if success else 0 - }), + detail=json.dumps({ + 'username': username, + 'authenticator': authenticator, + 'ip_address': request_ip, + 'success': 1 if success else 0 + }), created_by='System').add() + # Get a list of Azure security groups the user is a member of def get_azure_groups(uri): azure_info = azure.get(uri).text @@ -572,30 +606,33 @@ def get_azure_groups(uri): mygroups = [] return mygroups + # Handle user login, write history and, if set, handle showing the register_otp QR code. # if Setting for OTP on first login is enabled, and OTP field is also enabled, # but user isn't using it yet, enable OTP, get QR code and display it, logging the user out. def authenticate_user(user, authenticator, remember=False): login_user(user, remember=remember) signin_history(user.username, authenticator, True) - if Setting().get('otp_force') and Setting().get('otp_field_enabled') and not user.otp_secret: + if Setting().get('otp_force') and Setting().get('otp_field_enabled') and not user.otp_secret \ + and session['authentication_type'] not in ['OAuth']: user.update_profile(enable_otp=True) user_id = current_user.id prepare_welcome_user(user_id) return redirect(url_for('index.welcome')) return redirect(url_for('index.login')) + # Prepare user to enter /welcome screen, otherwise they won't have permission to do so def prepare_welcome_user(user_id): logout_user() session['welcome_user_id'] = user_id + @index_bp.route('/logout') def logout(): if current_app.config.get( 'SAML_ENABLED' - ) and 'samlSessionIndex' in session and current_app.config.get( - 'SAML_LOGOUT'): + ) and 'samlSessionIndex' in session and current_app.config.get('SAML_LOGOUT'): req = saml.prepare_flask_request(request) auth = saml.init_saml_auth(req) if current_app.config.get('SAML_LOGOUT_URL'): @@ -646,11 +683,102 @@ def logout(): return redirect(redirect_uri) +def password_policy_check(user, password): + def check_policy(chars, user_password, setting): + setting_as_int = int(Setting().get(setting)) + test_string = user_password + for c in chars: + test_string = test_string.replace(c, '') + return (setting_as_int, len(user_password) - len(test_string)) + + def matches_policy(item, policy_fails): + return "*" if item in policy_fails else "" + + policy = [] + policy_fails = {} + + # If either policy is enabled check basics first ... this is obvious! + if Setting().get('pwd_enforce_characters') or Setting().get('pwd_enforce_complexity'): + # Cannot contain username + if user.username in password: + policy_fails["username"] = True + policy.append(f"{matches_policy('username', policy_fails)}cannot contain username") + + # Cannot contain password + if user.firstname in password: + policy_fails["firstname"] = True + policy.append(f"{matches_policy('firstname', policy_fails)}cannot contain firstname") + + # Cannot contain lastname + if user.lastname in password: + policy_fails["lastname"] = True + policy.append(f"{matches_policy('lastname', policy_fails)}cannot contain lastname") + + # Cannot contain email + if user.email in password: + policy_fails["email"] = True + policy.append(f"{matches_policy('email', policy_fails)}cannot contain email") + + # Check if we're enforcing character requirements + if Setting().get('pwd_enforce_characters'): + # Length + pwd_min_len_setting = int(Setting().get('pwd_min_len')) + pwd_len = len(password) + if pwd_len < pwd_min_len_setting: + policy_fails["length"] = True + policy.append(f"{matches_policy('length', policy_fails)}length={pwd_len}/{pwd_min_len_setting}") + # Digits + (pwd_min_digits_setting, pwd_digits) = check_policy(string.digits, password, 'pwd_min_digits') + if pwd_digits < pwd_min_digits_setting: + policy_fails["digits"] = True + policy.append(f"{matches_policy('digits', policy_fails)}digits={pwd_digits}/{pwd_min_digits_setting}") + # Lowercase + (pwd_min_lowercase_setting, pwd_lowercase) = check_policy(string.digits, password, 'pwd_min_lowercase') + if pwd_lowercase < pwd_min_lowercase_setting: + policy_fails["lowercase"] = True + policy.append( + f"{matches_policy('lowercase', policy_fails)}lowercase={pwd_lowercase}/{pwd_min_lowercase_setting}") + # Uppercase + (pwd_min_uppercase_setting, pwd_uppercase) = check_policy(string.digits, password, 'pwd_min_uppercase') + if pwd_uppercase < pwd_min_uppercase_setting: + policy_fails["uppercase"] = True + policy.append( + f"{matches_policy('uppercase', policy_fails)}uppercase={pwd_uppercase}/{pwd_min_uppercase_setting}") + # Special + (pwd_min_special_setting, pwd_special) = check_policy(string.digits, password, 'pwd_min_special') + if pwd_special < pwd_min_special_setting: + policy_fails["special"] = True + policy.append(f"{matches_policy('special', policy_fails)}special={pwd_special}/{pwd_min_special_setting}") + + if Setting().get('pwd_enforce_complexity'): + # Complexity checking + zxcvbn_inputs = [] + for input in (user.firstname, user.lastname, user.username, user.email): + if len(input): + zxcvbn_inputs.append(input) + + result = zxcvbn(password, user_inputs=zxcvbn_inputs) + pwd_min_complexity_setting = int(Setting().get('pwd_min_complexity')) + pwd_complexity = result['guesses_log10'] + if pwd_complexity < pwd_min_complexity_setting: + policy_fails["complexity"] = True + policy.append( + f"{matches_policy('complexity', policy_fails)}complexity={pwd_complexity:.0f}/{pwd_min_complexity_setting}") + + policy_str = {"password": f"Fails policy: {', '.join(policy)}. Items prefixed with '*' failed."} + + # NK: the first item in the tuple indicates a PASS, so, we check for any True's and negate that + return (not any(policy_fails.values()), policy_str) + + @index_bp.route('/register', methods=['GET', 'POST']) def register(): + CAPTCHA_ENABLE = current_app.config.get('CAPTCHA_ENABLE') if Setting().get('signup_enabled'): + if current_user.is_authenticated: + return redirect(url_for('index.index')) if request.method == 'GET': - return render_template('register.html') + return render_template('register.html', captcha_enable=CAPTCHA_ENABLE) elif request.method == 'POST': username = request.form.get('username', '').strip() password = request.form.get('password', '') @@ -659,20 +787,45 @@ def register(): email = request.form.get('email', '').strip() rpassword = request.form.get('rpassword', '') - if not username or not password or not email: - return render_template( - 'register.html', error='Please input required information') - + is_valid_email = re.compile(r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$') + + error_messages = {} + if not firstname: + error_messages['firstname'] = 'First Name is required' + if not lastname: + error_messages['lastname'] = 'Last Name is required' + if not username: + error_messages['username'] = 'Username is required' + if not password: + error_messages['password'] = 'Password is required' + if not rpassword: + error_messages['rpassword'] = 'Password confirmation is required' + if not email: + error_messages['email'] = 'Email is required' + if not is_valid_email.match(email): + error_messages['email'] = 'Invalid email address' if password != rpassword: + error_messages['password'] = 'Password confirmation does not match' + error_messages['rpassword'] = 'Password confirmation does not match' + + if not captcha.validate(): return render_template( - 'register.html', - error="Password confirmation does not match") + 'register.html', error='Invalid CAPTCHA answer', error_messages=error_messages, + captcha_enable=CAPTCHA_ENABLE) + + if error_messages: + return render_template('register.html', error_messages=error_messages, captcha_enable=CAPTCHA_ENABLE) user = User(username=username, plain_text_password=password, firstname=firstname, lastname=lastname, - email=email) + email=email + ) + + (password_policy_pass, password_policy) = password_policy_check(user, password) + if not password_policy_pass: + return render_template('register.html', error_messages=password_policy, captcha_enable=CAPTCHA_ENABLE) try: result = user.create_local_user() @@ -687,11 +840,11 @@ def register(): return redirect(url_for('index.login')) else: return render_template('register.html', - error=result['msg']) + error=result['msg'], captcha_enable=CAPTCHA_ENABLE) except Exception as e: - return render_template('register.html', error=e) - else: - return render_template('errors/404.html'), 404 + return render_template('register.html', error=e, captcha_enable=CAPTCHA_ENABLE) + else: + return render_template('errors/404.html'), 404 # Show welcome page on first login if otp_force is enabled @@ -710,12 +863,15 @@ def welcome(): if otp_token and otp_token.isdigit(): good_token = user.verify_totp(otp_token) if not good_token: - return render_template('register_otp.html', qrcode_image=encoded_img_data.decode(), user=user, error="Invalid token") + return render_template('register_otp.html', qrcode_image=encoded_img_data.decode(), user=user, + error="Invalid token") else: - return render_template('register_otp.html', qrcode_image=encoded_img_data.decode(), user=user, error="Token required") + return render_template('register_otp.html', qrcode_image=encoded_img_data.decode(), user=user, + error="Token required") session.pop('welcome_user_id') return redirect(url_for('index.index')) + @index_bp.route('/confirm/', methods=['GET']) def confirm_email(token): email = confirm_token(token) @@ -761,6 +917,7 @@ def resend_confirmation_email(): @index_bp.route('/nic/checkip.html', methods=['GET', 'POST']) +@csrf.exempt def dyndns_checkip(): # This route covers the default ddclient 'web' setting for the checkip service return render_template('dyndns.html', @@ -769,6 +926,7 @@ def dyndns_checkip(): @index_bp.route('/nic/update', methods=['GET', 'POST']) +@csrf.exempt @dyndns_login_required def dyndns_update(): # dyndns protocol response codes in use are: @@ -800,10 +958,10 @@ def dyndns_update(): .outerjoin(Account, Domain.account_id == Account.id) \ .outerjoin(AccountUser, Account.id == AccountUser.account_id) \ .filter( - db.or_( - DomainUser.user_id == current_user.id, - AccountUser.user_id == current_user.id - )).all() + db.or_( + DomainUser.user_id == current_user.id, + AccountUser.user_id == current_user.id + )).all() except Exception as e: current_app.logger.error('DynDNS Error: {0}'.format(e)) current_app.logger.debug(traceback.format_exc()) @@ -836,7 +994,7 @@ def dyndns_update(): remote_addr = utils.validate_ipaddress( request.headers.get('X-Forwarded-For', - request.remote_addr).split(', ')[:1]) + request.remote_addr).split(', ')[0]) response = 'nochg' for ip in myip_addr or remote_addr: @@ -863,13 +1021,13 @@ def dyndns_update(): if result['status'] == 'ok': history = History( msg='DynDNS update: updated {} successfully'.format(hostname), - detail = json.dumps({ - 'domain': domain.name, - 'record': hostname, - 'type': rtype, - 'old_value': oldip, - 'new_value': str(ip) - }), + detail=json.dumps({ + 'domain': domain.name, + 'record': hostname, + 'type': rtype, + 'old_value': oldip, + 'new_value': str(ip) + }), created_by=current_user.username, domain_id=domain.id) history.add() @@ -880,7 +1038,7 @@ def dyndns_update(): elif r.is_allowed_edit(): ondemand_creation = DomainSetting.query.filter( DomainSetting.domain == domain).filter( - DomainSetting.setting == 'create_via_dyndns').first() + DomainSetting.setting == 'create_via_dyndns').first() if (ondemand_creation is not None) and (strtobool( ondemand_creation.value) == True): @@ -905,11 +1063,11 @@ def dyndns_update(): msg= 'DynDNS update: created record {0} in zone {1} successfully' .format(hostname, domain.name, str(ip)), - detail = json.dumps({ - 'domain': domain.name, - 'record': hostname, - 'value': str(ip) - }), + detail=json.dumps({ + 'domain': domain.name, + 'record': hostname, + 'value': str(ip) + }), created_by=current_user.username, domain_id=domain.id) history.add() @@ -928,7 +1086,7 @@ def dyndns_update(): ### START SAML AUTHENTICATION ### @index_bp.route('/saml/login') def saml_login(): - if not current_app.config.get('SAML_ENABLED'): + if not current_app.config.get('SAML_ENABLED', False): abort(400) from onelogin.saml2.utils import OneLogin_Saml2_Utils req = saml.prepare_flask_request(request) @@ -940,7 +1098,7 @@ def saml_login(): @index_bp.route('/saml/metadata') def saml_metadata(): - if not current_app.config.get('SAML_ENABLED'): + if not current_app.config.get('SAML_ENABLED', False): current_app.logger.error("SAML authentication is disabled.") abort(400) from onelogin.saml2.utils import OneLogin_Saml2_Utils @@ -959,16 +1117,17 @@ def saml_metadata(): @index_bp.route('/saml/authorized', methods=['GET', 'POST']) +@csrf.exempt def saml_authorized(): errors = [] - if not current_app.config.get('SAML_ENABLED'): + if not current_app.config.get('SAML_ENABLED', False): current_app.logger.error("SAML authentication is disabled.") abort(400) from onelogin.saml2.utils import OneLogin_Saml2_Utils req = saml.prepare_flask_request(request) auth = saml.init_saml_auth(req) auth.process_response() - current_app.logger.debug( auth.get_attributes() ) + current_app.logger.debug(auth.get_attributes()) errors = auth.get_errors() if len(errors) == 0: session['samlUserdata'] = auth.get_attributes() @@ -977,7 +1136,7 @@ def saml_authorized(): self_url = OneLogin_Saml2_Utils.get_self_url(req) self_url = self_url + req['script_name'] if 'RelayState' in request.form and self_url != request.form[ - 'RelayState']: + 'RelayState']: return redirect(auth.redirect_to(request.form['RelayState'])) if current_app.config.get('SAML_ATTRIBUTE_USERNAME', False): username = session['samlUserdata'][ @@ -1008,6 +1167,8 @@ def saml_authorized(): None) admin_group_name = current_app.config.get('SAML_GROUP_ADMIN_NAME', None) + operator_group_name = current_app.config.get('SAML_GROUP_OPERATOR_NAME', + None) group_to_account_mapping = create_group_to_account_mapping() if email_attribute_name in session['samlUserdata']: @@ -1048,25 +1209,27 @@ def saml_authorized(): account.add_user(user) history = History(msg='Adding {0} to account {1}'.format( user.username, account.name), - created_by='SAML Assertion') + created_by='SAML Assertion') history.add() for account in user_accounts - saml_accounts: account.remove_user(user) history = History(msg='Removing {0} from account {1}'.format( user.username, account.name), - created_by='SAML Assertion') + created_by='SAML Assertion') history.add() if admin_attribute_name and 'true' in session['samlUserdata'].get( admin_attribute_name, []): uplift_to_admin(user) elif admin_group_name in user_groups: uplift_to_admin(user) + elif operator_group_name in user_groups: + uplift_to_operator(user) elif admin_attribute_name or group_attribute_name: if user.role.name != 'User': user.role_id = Role.query.filter_by(name='User').first().id history = History(msg='Demoting {0} to user'.format( user.username), - created_by='SAML Assertion') + created_by='SAML Assertion') history.add() user.plain_text_password = None user.update_profile() @@ -1088,14 +1251,10 @@ def create_group_to_account_mapping(): def handle_account(account_name, account_description=""): - clean_name = ''.join(c for c in account_name.lower() - if c in "abcdefghijklmnopqrstuvwxyz0123456789") - if len(clean_name) > Account.name.type.length: - current_app.logger.error( - "Account name {0} too long. Truncated.".format(clean_name)) + clean_name = Account.sanitize_name(account_name) account = Account.query.filter_by(name=clean_name).first() if not account: - account = Account(name=clean_name.lower(), + account = Account(name=clean_name, description=account_description, contact='', mail='') @@ -1114,7 +1273,16 @@ def uplift_to_admin(user): user.role_id = Role.query.filter_by(name='Administrator').first().id history = History(msg='Promoting {0} to administrator'.format( user.username), - created_by='SAML Assertion') + created_by='SAML Assertion') + history.add() + + +def uplift_to_operator(user): + if user.role.name != 'Operator': + user.role_id = Role.query.filter_by(name='Operator').first().id + history = History(msg='Promoting {0} to operator'.format( + user.username), + created_by='SAML Assertion') history.add() diff --git a/powerdnsadmin/routes/user.py b/powerdnsadmin/routes/user.py index f411c29a9..469b45942 100644 --- a/powerdnsadmin/routes/user.py +++ b/powerdnsadmin/routes/user.py @@ -1,9 +1,16 @@ import datetime -from flask import Blueprint, request, render_template, make_response, jsonify, redirect, url_for, g, session, current_app +import hashlib +import imghdr +import mimetypes + +from flask import Blueprint, request, render_template, make_response, jsonify, redirect, url_for, g, session, \ + current_app, after_this_request, abort from flask_login import current_user, login_required, login_manager from ..models.user import User, Anonymous from ..models.setting import Setting +from .index import password_policy_check + user_bp = Blueprint('user', __name__, @@ -30,6 +37,11 @@ def before_request(): minutes=int(Setting().get('session_timeout'))) session.modified = True + # Clean up expired sessions in the database + if Setting().get('session_type') == 'sqlalchemy': + from ..models.sessions import Sessions + Sessions().clean_up_expired_sessions() + @user_bp.route('/profile', methods=['GET', 'POST']) @login_required @@ -74,12 +86,23 @@ def profile(): .format(current_user.username) }), 400) + (password_policy_pass, password_policy) = password_policy_check(current_user.get_user_info_by_username(), new_password) + if not password_policy_pass: + if request.data: + return make_response( + jsonify({ + 'status': 'error', + 'msg': password_policy['password'], + }), 400) + return render_template('user_profile.html', error_messages=password_policy) + user = User(username=current_user.username, plain_text_password=new_password, firstname=firstname, lastname=lastname, email=email, reload_info=False) + user.update_profile() return render_template('user_profile.html') @@ -96,4 +119,55 @@ def qrcode(): 'Cache-Control': 'no-cache, no-store, must-revalidate', 'Pragma': 'no-cache', 'Expires': '0' - } \ No newline at end of file + } + + +@user_bp.route('/image', methods=['GET']) +@login_required +def image(): + """Returns the user profile image or avatar.""" + + @after_this_request + def add_cache_headers(response_): + """When the response is ok, add cache headers.""" + if 200 <= response_.status_code <= 399: + response_.cache_control.private = True + response_.cache_control.max_age = int(datetime.timedelta(days=1).total_seconds()) + return response_ + + def return_image(content, content_type=None): + """Return the given binary image content. Guess the type if not given.""" + if not content_type: + guess = mimetypes.guess_type('example.' + imghdr.what(None, h=content)) + if guess and guess[0]: + content_type = guess[0] + + return content, 200, {'Content-Type': content_type} + + # To prevent "cache poisoning", the username query parameter is required + if request.args.get('username', None) != current_user.username: + abort(400) + + setting = Setting() + + if session['authentication_type'] == 'LDAP': + search_filter = '(&({0}={1}){2})'.format(setting.get('ldap_filter_username'), + current_user.username, + setting.get('ldap_filter_basic')) + result = User().ldap_search(search_filter, setting.get('ldap_base_dn')) + if result and result[0] and result[0][0] and result[0][0][1]: + user_obj = result[0][0][1] + for key in ['jpegPhoto', 'thumbnailPhoto']: + if key in user_obj and user_obj[key] and user_obj[key][0]: + current_app.logger.debug(f'Return {key} from ldap as user image') + return return_image(user_obj[key][0]) + + email = current_user.email + if email and setting.get('gravatar_enabled'): + hash_ = hashlib.md5(email.encode('utf-8')).hexdigest() + url = f'https://s.gravatar.com/avatar/{hash_}?s=100' + current_app.logger.debug('Redirect user image request to gravatar') + return redirect(url, 307) + + # Fallback to the local default image + return current_app.send_static_file('img/user_image.png') diff --git a/powerdnsadmin/services/azure.py b/powerdnsadmin/services/azure.py index 46fb1af16..faf1ac3dd 100644 --- a/powerdnsadmin/services/azure.py +++ b/powerdnsadmin/services/azure.py @@ -15,28 +15,41 @@ def update_token(token): session['azure_token'] = token return token + authlib_params = { + 'client_id': Setting().get('azure_oauth_key'), + 'client_secret': Setting().get('azure_oauth_secret'), + 'api_base_url': Setting().get('azure_oauth_api_url'), + 'request_token_url': None, + 'client_kwargs': {'scope': Setting().get('azure_oauth_scope')}, + 'fetch_token': fetch_azure_token, + } + + auto_configure = Setting().get('azure_oauth_auto_configure') + server_metadata_url = Setting().get('azure_oauth_metadata_url') + + if auto_configure and isinstance(server_metadata_url, str) and len(server_metadata_url.strip()) > 0: + authlib_params['server_metadata_url'] = server_metadata_url + else: + authlib_params['access_token_url'] = Setting().get('azure_oauth_token_url') + authlib_params['authorize_url'] = Setting().get('azure_oauth_authorize_url') + azure = authlib_oauth_client.register( 'azure', - client_id=Setting().get('azure_oauth_key'), - client_secret=Setting().get('azure_oauth_secret'), - api_base_url=Setting().get('azure_oauth_api_url'), - request_token_url=None, - access_token_url=Setting().get('azure_oauth_token_url'), - authorize_url=Setting().get('azure_oauth_authorize_url'), - client_kwargs={'scope': Setting().get('azure_oauth_scope')}, - fetch_token=fetch_azure_token, + **authlib_params ) @current_app.route('/azure/authorized') def azure_authorized(): - session['azure_oauthredir'] = url_for('.azure_authorized', - _external=True, - _scheme='https') + use_ssl = current_app.config.get('SERVER_EXTERNAL_SSL') + params = {'_external': True} + if isinstance(use_ssl, bool): + params['_scheme'] = 'https' if use_ssl else 'http' + session['azure_oauthredir'] = url_for('.azure_authorized', **params) token = azure.authorize_access_token() if token is None: return 'Access denied: reason=%s error=%s' % ( request.args['error'], request.args['error_description']) session['azure_token'] = (token) - return redirect(url_for('index.login', _external=True, _scheme='https')) + return redirect(url_for('index.login', **params)) return azure diff --git a/powerdnsadmin/services/github.py b/powerdnsadmin/services/github.py index cf615e8df..42862e908 100644 --- a/powerdnsadmin/services/github.py +++ b/powerdnsadmin/services/github.py @@ -15,28 +15,43 @@ def update_token(token): session['github_token'] = token return token + authlib_params = { + 'client_id': Setting().get('github_oauth_key'), + 'client_secret': Setting().get('github_oauth_secret'), + 'request_token_params': {'scope': Setting().get('github_oauth_scope')}, + 'api_base_url': Setting().get('github_oauth_api_url'), + 'request_token_url': None, + 'client_kwargs': {'scope': Setting().get('github_oauth_scope')}, + 'fetch_token': fetch_github_token, + 'update_token': update_token + } + + auto_configure = Setting().get('github_oauth_auto_configure') + server_metadata_url = Setting().get('github_oauth_metadata_url') + + if auto_configure and isinstance(server_metadata_url, str) and len(server_metadata_url.strip()) > 0: + authlib_params['server_metadata_url'] = server_metadata_url + else: + authlib_params['access_token_url'] = Setting().get('github_oauth_token_url') + authlib_params['authorize_url'] = Setting().get('github_oauth_authorize_url') + github = authlib_oauth_client.register( 'github', - client_id=Setting().get('github_oauth_key'), - client_secret=Setting().get('github_oauth_secret'), - request_token_params={'scope': Setting().get('github_oauth_scope')}, - api_base_url=Setting().get('github_oauth_api_url'), - request_token_url=None, - access_token_url=Setting().get('github_oauth_token_url'), - authorize_url=Setting().get('github_oauth_authorize_url'), - client_kwargs={'scope': Setting().get('github_oauth_scope')}, - fetch_token=fetch_github_token, - update_token=update_token) + **authlib_params + ) @current_app.route('/github/authorized') def github_authorized(): - session['github_oauthredir'] = url_for('.github_authorized', - _external=True) + use_ssl = current_app.config.get('SERVER_EXTERNAL_SSL') + params = {'_external': True} + if isinstance(use_ssl, bool): + params['_scheme'] = 'https' if use_ssl else 'http' + session['github_oauthredir'] = url_for('.github_authorized', **params) token = github.authorize_access_token() if token is None: return 'Access denied: reason=%s error=%s' % ( request.args['error'], request.args['error_description']) - session['github_token'] = (token) - return redirect(url_for('index.login')) + session['github_token'] = token + return redirect(url_for('index.login', **params)) return github diff --git a/powerdnsadmin/services/google.py b/powerdnsadmin/services/google.py index 68775a2c1..e3e6362e2 100644 --- a/powerdnsadmin/services/google.py +++ b/powerdnsadmin/services/google.py @@ -15,30 +15,44 @@ def update_token(token): session['google_token'] = token return token + authlib_params = { + 'client_id': Setting().get('google_oauth_client_id'), + 'client_secret': Setting().get('google_oauth_client_secret'), + 'api_base_url': Setting().get('google_base_url'), + 'request_token_url': None, + 'client_kwargs': {'scope': Setting().get('google_oauth_scope')}, + 'fetch_token': fetch_google_token, + 'update_token': update_token + } + + auto_configure = Setting().get('google_oauth_auto_configure') + server_metadata_url = Setting().get('google_oauth_metadata_url') + + if auto_configure and isinstance(server_metadata_url, str) and len(server_metadata_url.strip()) > 0: + authlib_params['server_metadata_url'] = server_metadata_url + else: + authlib_params['access_token_url'] = Setting().get('google_token_url') + authlib_params['authorize_url'] = Setting().get('google_authorize_url') + google = authlib_oauth_client.register( 'google', - client_id=Setting().get('google_oauth_client_id'), - client_secret=Setting().get('google_oauth_client_secret'), - api_base_url=Setting().get('google_base_url'), - request_token_url=None, - access_token_url=Setting().get('google_token_url'), - authorize_url=Setting().get('google_authorize_url'), - client_kwargs={'scope': Setting().get('google_oauth_scope')}, - fetch_token=fetch_google_token, - update_token=update_token) + **authlib_params + ) @current_app.route('/google/authorized') def google_authorized(): - session['google_oauthredir'] = url_for( - '.google_authorized', _external=True) + use_ssl = current_app.config.get('SERVER_EXTERNAL_SSL') + params = {'_external': True} + if isinstance(use_ssl, bool): + params['_scheme'] = 'https' if use_ssl else 'http' + session['google_oauthredir'] = url_for('.google_authorized', **params) token = google.authorize_access_token() if token is None: return 'Access denied: reason=%s error=%s' % ( request.args['error_reason'], request.args['error_description'] ) - session['google_token'] = (token) - return redirect(url_for('index.login')) + session['google_token'] = token + return redirect(url_for('index.login', **params)) return google - diff --git a/powerdnsadmin/services/oidc.py b/powerdnsadmin/services/oidc.py index 7e8172b6a..2e36070f2 100644 --- a/powerdnsadmin/services/oidc.py +++ b/powerdnsadmin/services/oidc.py @@ -15,27 +15,42 @@ def update_token(token): session['oidc_token'] = token return token + authlib_params = { + 'client_id': Setting().get('oidc_oauth_key'), + 'client_secret': Setting().get('oidc_oauth_secret'), + 'api_base_url': Setting().get('oidc_oauth_api_url'), + 'request_token_url': None, + 'client_kwargs': {'scope': Setting().get('oidc_oauth_scope')}, + 'fetch_token': fetch_oidc_token, + 'update_token': update_token + } + + auto_configure = Setting().get('oidc_oauth_auto_configure') + server_metadata_url = Setting().get('oidc_oauth_metadata_url') + + if auto_configure and isinstance(server_metadata_url, str) and len(server_metadata_url.strip()) > 0: + authlib_params['server_metadata_url'] = server_metadata_url + else: + authlib_params['access_token_url'] = Setting().get('oidc_oauth_token_url') + authlib_params['authorize_url'] = Setting().get('oidc_oauth_authorize_url') + oidc = authlib_oauth_client.register( 'oidc', - client_id=Setting().get('oidc_oauth_key'), - client_secret=Setting().get('oidc_oauth_secret'), - api_base_url=Setting().get('oidc_oauth_api_url'), - request_token_url=None, - access_token_url=Setting().get('oidc_oauth_token_url'), - authorize_url=Setting().get('oidc_oauth_authorize_url'), - client_kwargs={'scope': Setting().get('oidc_oauth_scope')}, - fetch_token=fetch_oidc_token, - update_token=update_token) + **authlib_params + ) @current_app.route('/oidc/authorized') def oidc_authorized(): - session['oidc_oauthredir'] = url_for('.oidc_authorized', - _external=True) + use_ssl = current_app.config.get('SERVER_EXTERNAL_SSL') + params = {'_external': True} + if isinstance(use_ssl, bool): + params['_scheme'] = 'https' if use_ssl else 'http' + session['oidc_oauthredir'] = url_for('.oidc_authorized', **params) token = oidc.authorize_access_token() if token is None: return 'Access denied: reason=%s error=%s' % ( request.args['error'], request.args['error_description']) - session['oidc_token'] = (token) - return redirect(url_for('index.login')) + session['oidc_token'] = token + return redirect(url_for('index.login', **params)) - return oidc \ No newline at end of file + return oidc diff --git a/powerdnsadmin/services/saml.py b/powerdnsadmin/services/saml.py index 40c97bf00..4a33ee635 100644 --- a/powerdnsadmin/services/saml.py +++ b/powerdnsadmin/services/saml.py @@ -72,8 +72,9 @@ def retrieve_idp_data(self): def prepare_flask_request(self, request): # If server is behind proxys or balancers use the HTTP_X_FORWARDED fields url_data = urlparse(request.url) + proto = request.headers.get('HTTP_X_FORWARDED_PROTO', request.scheme) return { - 'https': 'on' if request.scheme == 'https' else 'off', + 'https': 'on' if proto == 'https' else 'off', 'http_host': request.host, 'server_port': url_data.port, 'script_name': request.path, @@ -163,7 +164,8 @@ def init_saml_auth(self, req): 'signatureAlgorithm'] = 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha256' settings['security']['wantAssertionsEncrypted'] = current_app.config.get( 'SAML_ASSERTION_ENCRYPTED', True) - settings['security']['wantAttributeStatement'] = True + settings['security']['wantAttributeStatement'] = current_app.config.get( + 'SAML_WANT_ATTRIBUTE_STATEMENT', True) settings['security']['wantNameId'] = True settings['security']['authnRequestsSigned'] = current_app.config[ 'SAML_SIGN_REQUEST'] diff --git a/powerdnsadmin/static/assets/css/roboto_mono.css b/powerdnsadmin/static/assets/css/roboto_mono.css index dc14ffbe9..ceb7256e9 100644 --- a/powerdnsadmin/static/assets/css/roboto_mono.css +++ b/powerdnsadmin/static/assets/css/roboto_mono.css @@ -4,8 +4,8 @@ font-style: normal; font-weight: 300; src: local('Roboto Mono Light'), local('RobotoMono-Light'), - url('/static/assets/fonts/roboto-mono-v7-latin-300.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ - url('/static/assets/fonts/roboto-mono-v7-latin-300.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ + url('../fonts/roboto-mono-v7-latin-300.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ + url('../fonts/roboto-mono-v7-latin-300.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ } /* roboto-mono-regular - latin */ @font-face { @@ -13,8 +13,8 @@ font-style: normal; font-weight: 400; src: local('Roboto Mono'), local('RobotoMono-Regular'), - url('/static/assets/fonts/roboto-mono-v7-latin-regular.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ - url('/static/assets/fonts/roboto-mono-v7-latin-regular.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ + url('../fonts/roboto-mono-v7-latin-regular.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ + url('../fonts/roboto-mono-v7-latin-regular.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ } /* roboto-mono-700 - latin */ @font-face { @@ -22,6 +22,6 @@ font-style: normal; font-weight: 700; src: local('Roboto Mono Bold'), local('RobotoMono-Bold'), - url('/static/assets/fonts/roboto-mono-v7-latin-700.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ - url('/static/assets/fonts/roboto-mono-v7-latin-700.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ + url('../fonts/roboto-mono-v7-latin-700.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ + url('../fonts/roboto-mono-v7-latin-700.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ } \ No newline at end of file diff --git a/powerdnsadmin/static/assets/css/source_sans_pro.css b/powerdnsadmin/static/assets/css/source_sans_pro.css index 06ef9f49b..8cd030a5f 100644 --- a/powerdnsadmin/static/assets/css/source_sans_pro.css +++ b/powerdnsadmin/static/assets/css/source_sans_pro.css @@ -3,89 +3,89 @@ font-family: 'Source Sans Pro'; font-style: normal; font-weight: 300; - src: url('/static/assets/fonts/source-sans-pro-v13-latin-300.eot'); /* IE9 Compat Modes */ + src: url('../fonts/source-sans-pro-v13-latin-300.eot'); /* IE9 Compat Modes */ src: local('Source Sans Pro Light'), local('SourceSansPro-Light'), - url('/static/assets/fonts/source-sans-pro-v13-latin-300.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('/static/assets/fonts/source-sans-pro-v13-latin-300.woff2') format('woff2'), /* Super Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-300.woff') format('woff'), /* Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-300.ttf') format('truetype'), /* Safari, Android, iOS */ - url('/static/assets/fonts/source-sans-pro-v13-latin-300.svg#SourceSansPro') format('svg'); /* Legacy iOS */ + url('../fonts/source-sans-pro-v13-latin-300.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('../fonts/source-sans-pro-v13-latin-300.woff2') format('woff2'), /* Super Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-300.woff') format('woff'), /* Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-300.ttf') format('truetype'), /* Safari, Android, iOS */ + url('../fonts/source-sans-pro-v13-latin-300.svg#SourceSansPro') format('svg'); /* Legacy iOS */ } /* source-sans-pro-300italic - latin */ @font-face { font-family: 'Source Sans Pro'; font-style: italic; font-weight: 300; - src: url('/static/assets/fonts/source-sans-pro-v13-latin-300italic.eot'); /* IE9 Compat Modes */ + src: url('../fonts/source-sans-pro-v13-latin-300italic.eot'); /* IE9 Compat Modes */ src: local('Source Sans Pro Light Italic'), local('SourceSansPro-LightItalic'), - url('/static/assets/fonts/source-sans-pro-v13-latin-300italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('/static/assets/fonts/source-sans-pro-v13-latin-300italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-300italic.woff') format('woff'), /* Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-300italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('/static/assets/fonts/source-sans-pro-v13-latin-300italic.svg#SourceSansPro') format('svg'); /* Legacy iOS */ + url('../fonts/source-sans-pro-v13-latin-300italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('../fonts/source-sans-pro-v13-latin-300italic.woff2') format('woff2'), /* Super Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-300italic.woff') format('woff'), /* Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-300italic.ttf') format('truetype'), /* Safari, Android, iOS */ + url('../fonts/source-sans-pro-v13-latin-300italic.svg#SourceSansPro') format('svg'); /* Legacy iOS */ } /* source-sans-pro-regular - latin */ @font-face { font-family: 'Source Sans Pro'; font-style: normal; font-weight: 400; - src: url('/static/assets/fonts/source-sans-pro-v13-latin-regular.eot'); /* IE9 Compat Modes */ + src: url('../fonts/source-sans-pro-v13-latin-regular.eot'); /* IE9 Compat Modes */ src: local('Source Sans Pro Regular'), local('SourceSansPro-Regular'), - url('/static/assets/fonts/source-sans-pro-v13-latin-regular.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('/static/assets/fonts/source-sans-pro-v13-latin-regular.woff2') format('woff2'), /* Super Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-regular.woff') format('woff'), /* Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-regular.ttf') format('truetype'), /* Safari, Android, iOS */ - url('/static/assets/fonts/source-sans-pro-v13-latin-regular.svg#SourceSansPro') format('svg'); /* Legacy iOS */ + url('../fonts/source-sans-pro-v13-latin-regular.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('../fonts/source-sans-pro-v13-latin-regular.woff2') format('woff2'), /* Super Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-regular.woff') format('woff'), /* Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-regular.ttf') format('truetype'), /* Safari, Android, iOS */ + url('../fonts/source-sans-pro-v13-latin-regular.svg#SourceSansPro') format('svg'); /* Legacy iOS */ } /* source-sans-pro-italic - latin */ @font-face { font-family: 'Source Sans Pro'; font-style: italic; font-weight: 400; - src: url('/static/assets/fonts/source-sans-pro-v13-latin-italic.eot'); /* IE9 Compat Modes */ + src: url('../fonts/source-sans-pro-v13-latin-italic.eot'); /* IE9 Compat Modes */ src: local('Source Sans Pro Italic'), local('SourceSansPro-Italic'), - url('/static/assets/fonts/source-sans-pro-v13-latin-italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('/static/assets/fonts/source-sans-pro-v13-latin-italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-italic.woff') format('woff'), /* Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('/static/assets/fonts/source-sans-pro-v13-latin-italic.svg#SourceSansPro') format('svg'); /* Legacy iOS */ + url('../fonts/source-sans-pro-v13-latin-italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('../fonts/source-sans-pro-v13-latin-italic.woff2') format('woff2'), /* Super Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-italic.woff') format('woff'), /* Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-italic.ttf') format('truetype'), /* Safari, Android, iOS */ + url('../fonts/source-sans-pro-v13-latin-italic.svg#SourceSansPro') format('svg'); /* Legacy iOS */ } /* source-sans-pro-600 - latin */ @font-face { font-family: 'Source Sans Pro'; font-style: normal; font-weight: 600; - src: url('/static/assets/fonts/source-sans-pro-v13-latin-600.eot'); /* IE9 Compat Modes */ + src: url('../fonts/source-sans-pro-v13-latin-600.eot'); /* IE9 Compat Modes */ src: local('Source Sans Pro SemiBold'), local('SourceSansPro-SemiBold'), - url('/static/assets/fonts/source-sans-pro-v13-latin-600.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('/static/assets/fonts/source-sans-pro-v13-latin-600.woff2') format('woff2'), /* Super Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-600.woff') format('woff'), /* Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-600.ttf') format('truetype'), /* Safari, Android, iOS */ - url('/static/assets/fonts/source-sans-pro-v13-latin-600.svg#SourceSansPro') format('svg'); /* Legacy iOS */ + url('../fonts/source-sans-pro-v13-latin-600.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('../fonts/source-sans-pro-v13-latin-600.woff2') format('woff2'), /* Super Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-600.woff') format('woff'), /* Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-600.ttf') format('truetype'), /* Safari, Android, iOS */ + url('../fonts/source-sans-pro-v13-latin-600.svg#SourceSansPro') format('svg'); /* Legacy iOS */ } /* source-sans-pro-600italic - latin */ @font-face { font-family: 'Source Sans Pro'; font-style: italic; font-weight: 600; - src: url('/static/assets/fonts/source-sans-pro-v13-latin-600italic.eot'); /* IE9 Compat Modes */ + src: url('../fonts/source-sans-pro-v13-latin-600italic.eot'); /* IE9 Compat Modes */ src: local('Source Sans Pro SemiBold Italic'), local('SourceSansPro-SemiBoldItalic'), - url('/static/assets/fonts/source-sans-pro-v13-latin-600italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('/static/assets/fonts/source-sans-pro-v13-latin-600italic.woff2') format('woff2'), /* Super Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-600italic.woff') format('woff'), /* Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-600italic.ttf') format('truetype'), /* Safari, Android, iOS */ - url('/static/assets/fonts/source-sans-pro-v13-latin-600italic.svg#SourceSansPro') format('svg'); /* Legacy iOS */ + url('../fonts/source-sans-pro-v13-latin-600italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('../fonts/source-sans-pro-v13-latin-600italic.woff2') format('woff2'), /* Super Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-600italic.woff') format('woff'), /* Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-600italic.ttf') format('truetype'), /* Safari, Android, iOS */ + url('../fonts/source-sans-pro-v13-latin-600italic.svg#SourceSansPro') format('svg'); /* Legacy iOS */ } /* source-sans-pro-700 - latin */ @font-face { font-family: 'Source Sans Pro'; font-style: normal; font-weight: 700; - src: url('/static/assets/fonts/source-sans-pro-v13-latin-700.eot'); /* IE9 Compat Modes */ + src: url('../fonts/source-sans-pro-v13-latin-700.eot'); /* IE9 Compat Modes */ src: local('Source Sans Pro Bold'), local('SourceSansPro-Bold'), - url('/static/assets/fonts/source-sans-pro-v13-latin-700.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ - url('/static/assets/fonts/source-sans-pro-v13-latin-700.woff2') format('woff2'), /* Super Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-700.woff') format('woff'), /* Modern Browsers */ - url('/static/assets/fonts/source-sans-pro-v13-latin-700.ttf') format('truetype'), /* Safari, Android, iOS */ - url('/static/assets/fonts/source-sans-pro-v13-latin-700.svg#SourceSansPro') format('svg'); /* Legacy iOS */ + url('../fonts/source-sans-pro-v13-latin-700.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('../fonts/source-sans-pro-v13-latin-700.woff2') format('woff2'), /* Super Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-700.woff') format('woff'), /* Modern Browsers */ + url('../fonts/source-sans-pro-v13-latin-700.ttf') format('truetype'), /* Safari, Android, iOS */ + url('../fonts/source-sans-pro-v13-latin-700.svg#SourceSansPro') format('svg'); /* Legacy iOS */ } \ No newline at end of file diff --git a/powerdnsadmin/static/custom/css/custom.css b/powerdnsadmin/static/custom/css/custom.css index f5dcb343f..7c41c9578 100644 --- a/powerdnsadmin/static/custom/css/custom.css +++ b/powerdnsadmin/static/custom/css/custom.css @@ -42,15 +42,41 @@ table td { background-position: center; } -.navbar-nav>.user-menu>.dropdown-menu>li.user-header>img.img-circle.offline { - filter: brightness(0); - border-color: black; +.search-input { + width: 100%; } -.navbar-nav>.user-menu .user-image.offline { - filter: brightness(0); +.sidebar .image { padding-top: 0.7em; } +.sidebar .info { color: #fff; } +.sidebar .info p { margin: 0; } +.sidebar .info a { font-size: 0.8em; } + +/* Global Styles */ +table.records thead th, table.records tbody td { text-align: center; vertical-align: middle; } +table.records thead th:last-of-type { width: 50px; } +div.records > div.dataTables_wrapper > div.row:first-of-type { margin: 0 0.5em 0 0.5em; } +div.records > div.dataTables_wrapper > div.row:last-of-type { margin: 0.4em 0.5em 0.4em 0.5em; } +div.records > div.dataTables_wrapper table.dataTable { margin: 0 !important; } + +.diff { + font-family: monospace; + padding: 0 0.2em; +} +.diff::before { + content: "\00a0"; + padding-right: 0.1em; } -.search-input { - width: 100%; -} \ No newline at end of file +.diff-deletion { + background-color: lightcoral; +} +.diff-deletion::before { + content: "-"; +} + +.diff-addition { + background-color: lightgreen; +} +.diff-addition::before { + content: "+"; +} diff --git a/powerdnsadmin/static/custom/js/app-authentication-settings-editor.js b/powerdnsadmin/static/custom/js/app-authentication-settings-editor.js new file mode 100644 index 000000000..da273b76b --- /dev/null +++ b/powerdnsadmin/static/custom/js/app-authentication-settings-editor.js @@ -0,0 +1,801 @@ +let AuthenticationSettingsModel = function (user_data, api_url, csrf_token, selector) { + let self = this; + let target = null; + self.api_url = api_url; + self.csrf_token = csrf_token; + self.selector = selector; + self.loading = false; + self.saving = false; + self.saved = false; + self.save_failed = false; + self.messages = []; + self.messages_class = 'info'; + self.tab_active = ''; + self.tab_default = 'local'; + + let defaults = { + // Local Authentication Settings + local_db_enabled: true, + signup_enabled: true, + pwd_enforce_characters: 0, + pwd_min_len: 10, + pwd_min_lowercase: 3, + pwd_min_uppercase: 2, + pwd_min_digits: 2, + pwd_min_special: 1, + pwd_enforce_complexity: 0, + pwd_min_complexity: 11, + + // LDAP Authentication Settings + ldap_enabled: false, + ldap_type: 'ldap', + ldap_uri: '', + ldap_base_dn: '', + ldap_admin_username: '', + ldap_admin_password: '', + ldap_domain: '', + ldap_filter_basic: '', + ldap_filter_username: '', + ldap_filter_group: '', + ldap_filter_groupname: '', + ldap_sg_enabled: false, + ldap_admin_group: '', + ldap_operator_group: '', + ldap_user_group: '', + autoprovisioning: false, + autoprovisioning_attribute: '', + urn_value: '', + purge: 0, + + // Google OAuth2 Settings + google_oauth_enabled: false, + google_oauth_client_id: '', + google_oauth_client_secret: '', + google_oauth_scope: '', + google_base_url: '', + google_oauth_auto_configure: true, + google_oauth_metadata_url: '', + google_token_url: '', + google_authorize_url: '', + + // GitHub OAuth2 Settings + github_oauth_enabled: false, + github_oauth_key: '', + github_oauth_secret: '', + github_oauth_scope: '', + github_oauth_api_url: '', + github_oauth_auto_configure: false, + github_oauth_metadata_url: '', + github_oauth_token_url: '', + github_oauth_authorize_url: '', + + // Azure AD OAuth2 Settings + azure_oauth_enabled: false, + azure_oauth_key: '', + azure_oauth_secret: '', + azure_oauth_scope: '', + azure_oauth_api_url: '', + azure_oauth_auto_configure: true, + azure_oauth_metadata_url: '', + azure_oauth_token_url: '', + azure_oauth_authorize_url: '', + azure_sg_enabled: false, + azure_admin_group: '', + azure_operator_group: '', + azure_user_group: '', + azure_group_accounts_enabled: false, + azure_group_accounts_name: '', + azure_group_accounts_name_re: '', + azure_group_accounts_description: '', + azure_group_accounts_description_re: '', + + // OIDC OAuth2 Settings + oidc_oauth_enabled: false, + oidc_oauth_key: '', + oidc_oauth_secret: '', + oidc_oauth_scope: '', + oidc_oauth_api_url: '', + oidc_oauth_auto_configure: true, + oidc_oauth_metadata_url: '', + oidc_oauth_token_url: '', + oidc_oauth_authorize_url: '', + oidc_oauth_logout_url: '', + oidc_oauth_username: '', + oidc_oauth_email: '', + oidc_oauth_firstname: '', + oidc_oauth_last_name: '', + oidc_oauth_account_name_property: '', + oidc_oauth_account_description_property: '', + } + + self.init = function (autoload) { + self.loading = ko.observable(self.loading); + self.saving = ko.observable(self.saving); + self.saved = ko.observable(self.saved); + self.save_failed = ko.observable(self.save_failed); + self.messages = ko.observableArray(self.messages); + self.messages_class = ko.observable(self.messages_class); + self.tab_active = ko.observable(self.tab_active); + self.tab_default = ko.observable(self.tab_default); + self.update(user_data); + + let el = null; + if (typeof selector !== 'undefined') { + el = $(selector) + } + + if (el !== null && el.length > 0) { + target = el; + ko.applyBindings(self, el[0]); + } else { + ko.applyBindings(self); + } + + if (self.hasHash()) { + self.activateTab(self.getHash()); + } else { + self.activateDefaultTab(); + } + + self.setupListeners(); + self.setupValidation(); + + if (autoload) { + self.load(); + } + } + + self.load = function () { + self.loading(true); + $.ajax({ + url: self.api_url, + type: 'POST', + data: {_csrf_token: csrf_token}, + dataType: 'json', + success: self.onDataLoaded + }); + } + + self.save = function () { + if (!target.valid()) { + return false; + } + self.saving(true); + $.ajax({ + url: self.api_url, + type: 'POST', + data: {_csrf_token: csrf_token, commit: 1, data: ko.toJSON(self)}, + dataType: 'json', + success: self.onDataSaved + }); + } + + self.update = function (instance) { + for (const [key, value] of Object.entries($.extend(defaults, instance))) { + if (ko.isObservable(self[key])) { + self[key](value); + } else { + self[key] = ko.observable(value); + } + } + } + + self.setupListeners = function () { + if ('onhashchange' in window) { + $(window).bind('hashchange', self.onHashChange); + } + } + + self.destroyListeners = function () { + if ('onhashchange' in window) { + $(window).unbind('hashchange', self.onHashChange); + } + } + + self.setupValidation = function () { + let uuidRegExp = /^([0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12})|[0-9]+$/i; + + let footerErrorElements = [ + 'input#local_db_enabled', + ]; + + let errorCheckSelectors = [ + 'input.error:not([disabled])', + 'select.error:not([disabled])', + 'textarea.error:not([disabled])', + ]; + + let errorCheckQuery = errorCheckSelectors.join(','); + let tabs = target.find('.tab-content > *[data-tab]') + + let onElementChanged = function (event) { + target.valid(); + } + + let auth_enabled = function (value, element, params) { + let enabled = 0; + if (self.local_db_enabled()) { + enabled++; + } + if (self.ldap_enabled()) { + enabled++; + } + if (self.google_oauth_enabled()) { + enabled++; + } + if (self.github_oauth_enabled()) { + enabled++; + } + if (self.azure_oauth_enabled()) { + enabled++; + } + if (self.oidc_oauth_enabled()) { + enabled++; + } + return enabled > 0; + }; + + let ldap_exclusive = function (value, element, params) { + let enabled = 0; + if (self.ldap_sg_enabled() === 1) { + enabled++; + } + if (self.autoprovisioning() === 1) { + enabled++; + } + return enabled < 2; + } + + let uuid = function (value, element, params) { + return uuidRegExp.test(value); + } + + let local_enabled = function (element) { + return self.local_db_enabled(); + }; + + let ldap_enabled = function (element) { + return self.ldap_enabled(); + }; + + let google_oauth_enabled = function (element) { + return self.google_oauth_enabled(); + }; + + let github_oauth_enabled = function (element) { + return self.github_oauth_enabled(); + }; + + let azure_oauth_enabled = function (element) { + return self.azure_oauth_enabled(); + }; + + let oidc_oauth_enabled = function (element) { + return self.oidc_oauth_enabled(); + }; + + let enforce_characters = function (element) { + return self.local_db_enabled() === 1 && self.pwd_enforce_characters() === 1; + }; + + let enforce_complexity = function (element) { + return self.local_db_enabled() === 1 && self.pwd_enforce_complexity() === 1; + }; + + let ldap_type_openldap = function (element) { + return self.ldap_enabled() && self.ldap_type() === 'ldap'; + }; + + let ldap_type_ad = function (element) { + return self.ldap_enabled() && self.ldap_type() === 'ad'; + }; + + let ldap_sg_enabled = function (element) { + return self.ldap_enabled() === 1 && self.ldap_sg_enabled() === 1; + } + + let ldap_ap_enabled = function (element) { + return self.ldap_enabled() === 1 && self.autoprovisioning() === 1; + } + + let azure_gs_enabled = function (element) { + return self.azure_oauth_enabled() === 1 && self.azure_sg_enabled() === 1; + } + + let azure_gas_enabled = function (element) { + return self.azure_oauth_enabled() && self.azure_group_accounts_enabled(); + } + + let google_oauth_auto_configure_enabled = function (element) { + return self.google_oauth_enabled() && self.google_oauth_auto_configure(); + } + + let google_oauth_auto_configure_disabled = function (element) { + return self.google_oauth_enabled() && !self.google_oauth_auto_configure(); + } + + let github_oauth_auto_configure_enabled = function (element) { + return self.github_oauth_enabled() && self.github_oauth_auto_configure(); + } + + let github_oauth_auto_configure_disabled = function (element) { + return self.github_oauth_enabled() && !self.github_oauth_auto_configure(); + } + + let azure_oauth_auto_configure_enabled = function (element) { + return self.azure_oauth_enabled() && self.azure_oauth_auto_configure(); + } + + let azure_oauth_auto_configure_disabled = function (element) { + return self.azure_oauth_enabled() && !self.azure_oauth_auto_configure(); + } + + let oidc_oauth_auto_configure_enabled = function (element) { + return self.oidc_oauth_enabled() && self.oidc_oauth_auto_configure(); + } + + let oidc_oauth_auto_configure_disabled = function (element) { + return self.oidc_oauth_enabled() && !self.oidc_oauth_auto_configure(); + } + + jQuery.validator.addMethod('auth_enabled', auth_enabled, 'At least one authentication method must be enabled.'); + jQuery.validator.addMethod('ldap_exclusive', ldap_exclusive, 'The LDAP group security and role auto-provisioning features are mutually exclusive.'); + jQuery.validator.addMethod('uuid', uuid, 'A valid UUID is required.'); + + target.validate({ + ignore: '', + errorPlacement: function (error, element) { + let useFooter = false; + for (let i = 0; i < footerErrorElements.length; i++) { + if (element.is(footerErrorElements[i])) { + useFooter = true; + } + } + if (useFooter) { + target.find('.card-footer > .error').append(error); + } else if (element.is('input[type=radio]')) { + error.insertAfter(element.parents('div.radio')); + } else { + element.after(error); + } + }, + showErrors: function (errorMap, errorList) { + this.defaultShowErrors(); + tabs.each(function (index, tab) { + tab = $(tab); + let tabId = tab.data('tab'); + let tabLink = target.find('.nav-tabs > li > a[data-tab="' + tabId + '"]'); + if (tab.find(errorCheckQuery).length > 0) { + tabLink.addClass('error'); + } else { + tabLink.removeClass('error'); + } + }); + }, + rules: { + local_db_enabled: 'auth_enabled', + ldap_enabled: 'auth_enabled', + google_oauth_enabled: 'auth_enabled', + github_oauth_enabled: 'auth_enabled', + azure_oauth_enabled: 'auth_enabled', + oidc_oauth_enabled: 'auth_enabled', + pwd_min_len: { + required: enforce_characters, + digits: true, + min: 1, + max: 64, + }, + pwd_min_lowercase: { + required: enforce_characters, + digits: true, + min: 0, + max: 64, + }, + pwd_min_uppercase: { + required: enforce_characters, + digits: true, + min: 0, + max: 64, + }, + pwd_min_digits: { + required: enforce_characters, + digits: true, + min: 0, + max: 64, + }, + pwd_min_special: { + required: enforce_characters, + digits: true, + min: 0, + max: 64, + }, + pwd_min_complexity: { + required: enforce_complexity, + digits: true, + min: 1, + max: 1000, + }, + ldap_type: ldap_enabled, + ldap_uri: { + required: ldap_enabled, + minlength: 11, + maxlength: 255, + }, + ldap_base_dn: { + required: ldap_enabled, + minlength: 4, + maxlength: 255, + }, + ldap_admin_username: { + required: ldap_type_openldap, + minlength: 4, + maxlength: 255, + }, + ldap_admin_password: { + required: ldap_type_openldap, + minlength: 1, + maxlength: 255, + }, + ldap_domain: { + required: ldap_type_ad, + minlength: 1, + maxlength: 255, + }, + ldap_filter_basic: { + required: ldap_enabled, + minlength: 3, + maxlength: 1000, + }, + ldap_filter_username: { + required: ldap_enabled, + minlength: 1, + maxlength: 100, + }, + ldap_filter_group: { + required: ldap_type_openldap, + minlength: 3, + maxlength: 100, + }, + ldap_filter_groupname: { + required: ldap_type_openldap, + minlength: 1, + maxlength: 100, + }, + ldap_sg_enabled: { + required: ldap_enabled, + ldap_exclusive: true, + }, + ldap_admin_group: { + required: ldap_sg_enabled, + minlength: 3, + maxlength: 100, + }, + ldap_operator_group: { + required: ldap_sg_enabled, + minlength: 3, + maxlength: 100, + }, + ldap_user_group: { + required: ldap_sg_enabled, + minlength: 3, + maxlength: 100, + }, + autoprovisioning: { + required: ldap_enabled, + ldap_exclusive: true, + }, + autoprovisioning_attribute: { + required: ldap_ap_enabled, + minlength: 1, + maxlength: 100, + }, + urn_value: { + required: ldap_ap_enabled, + minlength: 1, + maxlength: 100, + }, + purge: ldap_enabled, + google_oauth_client_id: { + required: google_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + google_oauth_client_secret: { + required: google_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + google_oauth_scope: { + required: google_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + google_base_url: { + required: google_oauth_enabled, + minlength: 1, + maxlength: 255, + url: true, + }, + google_oauth_metadata_url: { + required: google_oauth_auto_configure_enabled, + minlength: 1, + maxlength: 255, + url: true, + }, + google_token_url: { + required: google_oauth_auto_configure_disabled, + minlength: 1, + maxlength: 255, + url: true, + }, + google_authorize_url: { + required: google_oauth_auto_configure_disabled, + minlength: 1, + maxlength: 255, + url: true, + }, + github_oauth_key: { + required: github_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + github_oauth_secret: { + required: github_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + github_oauth_scope: { + required: github_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + github_oauth_api_url: { + required: github_oauth_enabled, + minlength: 1, + maxlength: 255, + url: true, + }, + github_oauth_metadata_url: { + required: github_oauth_auto_configure_enabled, + minlength: 1, + maxlength: 255, + url: true, + }, + github_oauth_token_url: { + required: github_oauth_auto_configure_disabled, + minlength: 1, + maxlength: 255, + url: true, + }, + github_oauth_authorize_url: { + required: github_oauth_auto_configure_disabled, + minlength: 1, + maxlength: 255, + url: true, + }, + azure_oauth_key: { + required: azure_oauth_enabled, + minlength: 1, + maxlength: 255, + uuid: true, + }, + azure_oauth_secret: { + required: azure_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + azure_oauth_scope: { + required: azure_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + azure_oauth_api_url: { + required: azure_oauth_enabled, + minlength: 1, + maxlength: 255, + url: true, + }, + azure_oauth_metadata_url: { + required: azure_oauth_auto_configure_enabled, + minlength: 1, + maxlength: 255, + url: true, + }, + azure_oauth_token_url: { + required: azure_oauth_auto_configure_disabled, + minlength: 1, + maxlength: 255, + url: true, + }, + azure_oauth_authorize_url: { + required: azure_oauth_auto_configure_disabled, + minlength: 1, + maxlength: 255, + url: true, + }, + azure_sg_enabled: azure_oauth_enabled, + azure_admin_group: { + uuid: azure_gs_enabled, + }, + azure_operator_group: { + uuid: azure_gs_enabled, + }, + azure_user_group: { + uuid: azure_gs_enabled, + }, + azure_group_accounts_enabled: azure_oauth_enabled, + azure_group_accounts_name: { + required: azure_gas_enabled, + minlength: 1, + maxlength: 255, + }, + azure_group_accounts_name_re: { + required: azure_gas_enabled, + minlength: 1, + maxlength: 255, + }, + azure_group_accounts_description: { + required: azure_gas_enabled, + minlength: 1, + maxlength: 255, + }, + azure_group_accounts_description_re: { + required: azure_gas_enabled, + minlength: 1, + maxlength: 255, + }, + oidc_oauth_key: { + required: oidc_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + oidc_oauth_secret: { + required: oidc_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + oidc_oauth_scope: { + required: oidc_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + oidc_oauth_api_url: { + required: oidc_oauth_enabled, + minlength: 1, + maxlength: 255, + url: true, + }, + oidc_oauth_metadata_url: { + required: oidc_oauth_auto_configure_enabled, + minlength: 1, + maxlength: 255, + url: true, + }, + oidc_oauth_token_url: { + required: oidc_oauth_auto_configure_disabled, + minlength: 1, + maxlength: 255, + url: true, + }, + oidc_oauth_authorize_url: { + required: oidc_oauth_auto_configure_disabled, + minlength: 1, + maxlength: 255, + url: true, + }, + oidc_oauth_logout_url: { + required: oidc_oauth_enabled, + minlength: 1, + maxlength: 255, + url: true, + }, + oidc_oauth_username: { + required: oidc_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + oidc_oauth_email: { + required: oidc_oauth_enabled, + minlength: 1, + maxlength: 255, + }, + oidc_oauth_firstname: { + minlength: 0, + maxlength: 255, + }, + oidc_oauth_last_name: { + minlength: 0, + maxlength: 255, + }, + oidc_oauth_account_name_property: { + minlength: 0, + maxlength: 255, + }, + oidc_oauth_account_description_property: { + minlength: 0, + maxlength: 255, + }, + }, + messages: { + ldap_sg_enabled: { + ldap_exclusive: 'The LDAP group security feature is mutually exclusive with the LDAP role auto-provisioning feature.', + }, + autoprovisioning: { + ldap_exclusive: 'The LDAP role auto-provisioning feature is mutually exclusive with the LDAP group security feature.', + }, + }, + }); + + target.find('input, select, textarea, label').on('change,keyup,blur,click', onElementChanged); + target.valid(); + } + + self.activateTab = function (tab) { + $('[role="tablist"] a.nav-link').blur(); + self.tab_active(tab); + window.location.hash = tab; + } + + self.activateDefaultTab = function () { + self.activateTab(self.tab_default()); + } + + self.getHash = function () { + return window.location.hash.substring(1); + } + + self.hasHash = function () { + return window.location.hash.length > 1; + } + + self.onDataLoaded = function (result) { + if (result.status == 0) { + self.messages_class('danger'); + self.messages(result.messages); + self.loading(false); + return false; + } + + self.update(result.data); + self.messages_class('info'); + self.messages(result.messages); + self.loading(false); + } + + self.onDataSaved = function (result) { + if (result.status == 0) { + self.saved(false); + self.save_failed(true); + self.messages_class('danger'); + self.messages(result.messages); + self.saving(false); + return false; + } + + self.update(result.data); + self.saved(true); + self.save_failed(false); + self.messages_class('info'); + self.messages(result.messages); + self.saving(false); + } + + self.onHashChange = function (event) { + let hash = window.location.hash.trim(); + if (hash.length > 1) { + self.activateTab(hash.substring(1)); + } else { + self.activateDefaultTab(); + } + } + + self.onSaveClick = function (model, event) { + self.save(); + return false; + } + + self.onTabClick = function (model, event) { + self.activateTab($(event.target).data('tab')); + return false; + } +} diff --git a/powerdnsadmin/static/custom/js/custom.js b/powerdnsadmin/static/custom/js/custom.js index 051a2e038..e4890d94f 100644 --- a/powerdnsadmin/static/custom/js/custom.js +++ b/powerdnsadmin/static/custom/js/custom.js @@ -12,13 +12,7 @@ function applyChanges(data, url, showResult, refreshPage) { console.log("Applied changes successfully."); console.log(data); if (showResult) { - var modal = $("#modal_success"); - if (data['msg']) { - modal.find('.modal-body p').text(data['msg']); - } else { - modal.find('.modal-body p').text("Applied changes successfully"); - } - modal.modal('show'); + showSuccessModal(data['msg'] || "Applied changes successfully"); } if (refreshPage) { location.reload(true); @@ -27,10 +21,8 @@ function applyChanges(data, url, showResult, refreshPage) { error : function(jqXHR, status) { console.log(jqXHR); - var modal = $("#modal_error"); var responseJson = jQuery.parseJSON(jqXHR.responseText); - modal.find('.modal-body p').text(responseJson['msg']); - modal.modal('show'); + showErrorModal(responseJson['msg']); } }); } @@ -38,30 +30,26 @@ function applyChanges(data, url, showResult, refreshPage) { function applyRecordChanges(data, domain) { $.ajax({ type : "POST", - url : $SCRIPT_ROOT + '/domain/' + domain + '/apply', + url : $SCRIPT_ROOT + '/domain/' + encodeURIComponent(domain) + '/apply', data : JSON.stringify(data),// now data come in this function contentType : "application/json; charset=utf-8", crossDomain : true, dataType : "json", success : function(data, status, jqXHR) { // update Apply button value - $.getJSON($SCRIPT_ROOT + '/domain/' + domain + '/info', function(data) { + $.getJSON($SCRIPT_ROOT + '/domain/' + encodeURIComponent(domain) + '/info', function(data) { $(".button_apply_changes").val(data['serial']); }); console.log("Applied changes successfully.") - var modal = $("#modal_success"); - modal.find('.modal-body p').text("Applied changes successfully"); - modal.modal('show'); + showSuccessModal("Applied changes successfully"); setTimeout(() => {window.location.reload()}, 2000); }, error : function(jqXHR, status) { console.log(jqXHR); - var modal = $("#modal_error"); var responseJson = jQuery.parseJSON(jqXHR.responseText); - modal.find('.modal-body p').text(responseJson['msg']); - modal.modal('show'); + showErrorModal(responseJson['msg']); } }); } @@ -105,8 +93,8 @@ function saveRow(oTable, nRow) { oTable.cell(nRow,5).data(jqInputs[2].value); var record = jqInputs[0].value; - var button_edit = "" - var button_delete = "" + var button_edit = "" + var button_delete = "" oTable.cell(nRow,6).data(button_edit); oTable.cell(nRow,7).data(button_delete); @@ -154,8 +142,8 @@ function editRow(oTable, nRow) { jqTds[3].innerHTML = ''; jqTds[4].innerHTML = ''; jqTds[5].innerHTML = ''; - jqTds[6].innerHTML = ''; - jqTds[7].innerHTML = ''; + jqTds[6].innerHTML = ''; + jqTds[7].innerHTML = ''; // set current value of dropdown column if (aData[2] == 'Active'){ @@ -204,12 +192,12 @@ function getdnssec(url, domain){ if (dnssec.length == 0 && parseFloat(PDNS_VERSION) >= 4.1) { dnssec_msg = '

DNSSEC is disabled. Click on Enable to activate it.'; modal.find('.modal-body p').html(dnssec_msg); - dnssec_footer = ''; + dnssec_footer = ''; modal.find('.modal-footer ').html(dnssec_footer); } else { if (parseFloat(PDNS_VERSION) >= 4.1) { - dnssec_footer = ''; + dnssec_footer = ''; modal.find('.modal-footer ').html(dnssec_footer); } for (var i = 0; i < dnssec.length; i++) { @@ -299,4 +287,17 @@ function copy_otp_secret_to_clipboard() { navigator.clipboard.writeText(copyBox.value); $("#copy_tooltip").css("visibility", "visible"); setTimeout(function(){ $("#copy_tooltip").css("visibility", "collapse"); }, 2000); - } \ No newline at end of file + } + +// Side menu nav bar active selection +/** add active class and stay opened when selected */ + +// for sidebar menu entirely but not cover treeview +$('ul.nav-sidebar a').filter(function() { + return this.href == window.location.href.split('?')[0]; +}).addClass('active'); + +// for treeview +$('ul.nav-treeview a').filter(function() { + return this.href == window.location.href.split('?')[0]; +}).parentsUntil(".nav-sidebar > .nav-treeview").addClass('menu-open').prev('a').addClass('active'); diff --git a/powerdnsadmin/static/img/gravatar.png b/powerdnsadmin/static/img/gravatar.png deleted file mode 100644 index 83602867a..000000000 Binary files a/powerdnsadmin/static/img/gravatar.png and /dev/null differ diff --git a/powerdnsadmin/static/img/user_image.png b/powerdnsadmin/static/img/user_image.png new file mode 100644 index 000000000..325b0ef85 Binary files /dev/null and b/powerdnsadmin/static/img/user_image.png differ diff --git a/powerdnsadmin/swagger-spec.yaml b/powerdnsadmin/swagger-spec.yaml index dbf484ece..75b1d9f00 100644 --- a/powerdnsadmin/swagger-spec.yaml +++ b/powerdnsadmin/swagger-spec.yaml @@ -782,6 +782,32 @@ paths: '422': description: 'Returned when something is wrong with the content of the request. Contains an error message' + '/servers/{server_id}/health': + get: + security: + - APIKeyHeader: [] + summary: Perfoms health check + operationId: health_check + tags: + - Monitoring + parameters: + - name: server_id + in: path + required: true + description: The id of the server to retrieve + type: string + responses: + '200': + description: Healthcheck succeeded + schema: + type: string + example: "up" + '503': + description: Healthcheck failed + schema: + type: string + example: Down/Unknown + '/pdnsadmin/zones': get: security: diff --git a/powerdnsadmin/templates/admin_edit_account.html b/powerdnsadmin/templates/admin_edit_account.html index 41f2d430c..827a3e089 100644 --- a/powerdnsadmin/templates/admin_edit_account.html +++ b/powerdnsadmin/templates/admin_edit_account.html @@ -1,165 +1,247 @@ {% extends "base.html" %} {% set active_page = "admin_accounts" %} -{% block title %}Edit Account - {{ SITE_NAME }}{% endblock %} +{% if create %} + {% set action_label = 'Create' %} + {% set form_action = url_for('admin.edit_account') %} +{% else %} + {% set action_label = 'Edit' %} + {% set form_action = url_for('admin.edit_account', account_name=account.name) %} +{% endif %} +{% block title %}{{ action_label }} Account - {{ SITE_NAME }}{% endblock %} {% block dashboard_stat %} - -
-

- Account - {% if create %}New account{% else %}{{ account.name }}{% endif %} -

- -
+
+
+
+
+

{{ action_label }} Account

+
+
+ +
+
+
+
{% endblock %} {% block content %} -
-
-
-
-
-

{% if create %}Add{% else %}Edit{% endif %} account

-
- - -
- - -
- {% if error %} -
- -

Error!

- {{ error }} -
- {{ error }} - {% endif %} -
- - - - {% if invalid_accountname %} - Cannot be blank and must only contain alphanumeric - characters. - {% elif duplicate_accountname %} - Account name already in use. - {% endif %} -
-
- - - +
+
+
+
+ + + +
+
+

Account Editor

+
+ +
+ {% if error %} +
+ +

Error!

+ {{ error }} +
+ {{ error }} + {% endif %} +
+ + + + {% if invalid_accountname %} + Cannot be blank and must only contain alphanumeric + characters{% if SETTING.get('account_name_extra_chars') %}, dots, hyphens or underscores{% endif %}. + + {% elif duplicate_accountname %} + Account name already in use. + {% endif %} +
+
+ + + +
+
+ + + +
+
+ + + +
+
+ +
+

Access Control

+
+ +
+

Users on the right have access to manage records in all zones + associated with the account. +

+

Click on users to move between columns.

+
+ +
+
+ +
+

Zones on the right are associated with the account. Red marked zone names are + already associated with other accounts. + Moving already associated zones to this account will overwrite the previous + associated account. +

+

Hover over the red zone names to show the associated account. Click on zones to + move between columns.

+
+ +
+
+ + +
-
- - - -
-
- - - + + +
+ + +
+
+
+

Account Editor Help

-
-
-

Access Control

-
-
-

Users on the right have access to manage records in all domains - associated with the account.

-

Click on users to move between columns.

-
- + +
+

+ An account allows grouping of zones belonging to a particular entity, such as a + customer or + department. +

+

+ A zone can be assigned to an account upon zone creation or through the zone + administration + page. +

+

Fill in all the fields to the in the form to the left.

+

+ Name is an account identifier. It will be lowercase and can contain + alphanumeric + characters{% if SETTING.get('account_name_extra_chars') %}, dots, hyphens and + underscores (no space or other special character is allowed) + {% else %} (no extra character is allowed){% endif %}.
+ Description is a user-friendly name for this account.
+ Contact person is the name of a contact person at the account.
+ Mail Address is an e-mail address for the contact person. +

+
- - -
-
-
-
-
-

Help with creating a new account

-
-
-

- An account allows grouping of domains belonging to a particular entity, such as a customer or - department.
- A domain can be assigned to an account upon domain creation or through the domain administration - page. -

-

Fill in all the fields to the in the form to the left.

-

- Name is an account identifier. It will be stored as all lowercase letters (no - spaces, special characters etc).
- Description is a user friendly name for this account.
- Contact person is the name of a contact person at the account.
- Mail Address is an e-mail address for the contact person. -

+
+
+
-
-
+ +
{% endblock %} + {% block extrascripts %} - + + addMultiSelect("#account_multi_user", "Username") + addMultiSelect("#account_domains", "Zone") + {% endblock %} diff --git a/powerdnsadmin/templates/admin_edit_key.html b/powerdnsadmin/templates/admin_edit_key.html index 7c8ca17d7..811854968 100644 --- a/powerdnsadmin/templates/admin_edit_key.html +++ b/powerdnsadmin/templates/admin_edit_key.html @@ -1,283 +1,338 @@ {% extends "base.html" %} {% set active_page = "admin_keys" %} -{% if create or (key is not none and key.role.name != "User") %}{% set hide_opts = True %}{%else %}{% set hide_opts = False %}{% endif %} -{% block title %} -Edit Key - {{ SITE_NAME }} -{% endblock %} +{% if create %} + {% set action_label = 'Create' %} + {% set form_action = url_for('admin.edit_key') %} +{% else %} + {% set action_label = 'Edit' %} + {% set form_action = url_for('admin.edit_key', key_id=key.id) %} +{% endif %} +{% if (key is not none and key.role.name != "User") %}{% set hide_opts = True %}{% else %} + {% set hide_opts = False %}{% endif %} +{% block title %}{{ action_label }} API Key - {{ SITE_NAME }}{% endblock %} + {% block dashboard_stat %} - -
-

- Key - {% if create %}New key{% else %}{{ key.id }}{% endif %} -

- -
+
+
+
+
+

{{ action_label }} API Key

+
+
+ +
+
+
+
{% endblock %} {% block content %} -
-
-
-
-
-

{% if create %}Add{% else %}Edit{% endif %} key

+
+
+
+
+
+ + +
+
+

API Key Editor

+
+ +
+
+ + +
+ +
+ + + +
+ +
+ + + + + + + + + + + +
+ +
- - -
- - -
-
- - + + +
+
+
+

API Key Editor Help

-
- - + +
+

Fill in all the fields in the form to the left.

+

Role The role of the key.

+

Description The key description.

+

Access Control The zones or accounts which the key has access to.

+
- - - - - - -
-
-
-
-
-

Help with {% if create %}creating a new{% else%}updating a{% endif %} key -

-
-
-

Fill in all the fields in the form to the left.

-

Role The role of the key.

-

Description The key description.

-

Access Control The domains or accounts which the key has access to.

+
+
+
-
-
+ +
{% endblock %} + {% block extrascripts %} - + that.qs2 = $selectionSearch.quicksearch(selectionSearchString) + .on('keydown', function (e) { + if (e.which == 40) { + that.$selectionUl.focus(); + return false; + } + }); + }, + afterSelect: function () { + this.qs1.cache(); + this.qs2.cache(); + }, + afterDeselect: function () { + this.qs1.cache(); + this.qs2.cache(); + } + }); + {% if plain_key %} + $(document.body).ready(function () { + var modal = $("#modal_show_key"); + var info = "Please copy this key to a secure location. You will be unable to view it again once you close this window: {{ plain_key }}"; + modal.find('.modal-body p').text(info); + modal.find('#button_key_confirm').click(redirect_modal); + modal.find('#button_close_modal').click(redirect_modal); + modal.modal('show'); + }); + + function redirect_modal() { + window.location.href = '{{ url_for('admin.manage_keys') }}'; + modal.modal('hide'); + } + {% endif %} + {% endblock %} + {% block modals %} -