diff --git a/Makefile b/Makefile index d6655a0e4..5f71c6132 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ django-shell: @CMD="poetry run python manage.py shell" $(MAKE) exec-backend-command dump-data: - @CMD="poetry run python manage.py dumpdata github owasp --indent=2 --output=data/nest.json" $(MAKE) exec-backend-command + @CMD="poetry run python manage.py dumpdata github owasp --indent=2" $(MAKE) exec-backend-command > data/nest.json exec-backend-command: @docker exec -it nest-backend $(CMD) 2>/dev/null @@ -19,8 +19,8 @@ github-sync-owasp-organization: github-sync-related-repositories: @CMD="poetry run python manage.py github_sync_related_repositories" $(MAKE) exec-backend-command -github-summarize-issues: - @CMD="poetry run python manage.py github_summarize_issues" $(MAKE) exec-backend-command +github-enrich-issues: + @CMD="poetry run python manage.py github_enrich_issues" $(MAKE) exec-backend-command index: @CMD="poetry run python manage.py algolia_reindex" $(MAKE) exec-backend-command @@ -63,11 +63,11 @@ sync: \ github-sync-owasp-organization \ owasp-scrape-site-data \ github-sync-related-repositories \ - github-summarize-issues \ + github-enrich-issues \ owasp-aggregate-projects-data test: @docker build -f backend/Dockerfile.test backend -t nest-backend-test 2>/dev/null - @docker run -e DJANGO_CONFIGURATION=Test nest-backend-test poetry run pytest 2>/dev/null + @docker run -e DJANGO_CONFIGURATION=Test nest-backend-test poetry run pytest update: sync index diff --git a/backend/.env/template b/backend/.env/template index 0db91800b..bd4ba1f27 100644 --- a/backend/.env/template +++ b/backend/.env/template @@ -11,4 +11,6 @@ DJANGO_DB_PORT="None" DJANGO_DB_USER="None" DJANGO_OPEN_AI_SECRET_KEY="None" DJANGO_SECRET_KEY="None" +DJANGO_SLACK_APP_TOKEN="None" +DJANGO_SLACK_BOT_TOKEN="None" GITHUB_TOKEN="None" diff --git a/backend/apps/common/open_ai.py b/backend/apps/common/open_ai.py index 558c14203..83ef5c92d 100644 --- a/backend/apps/common/open_ai.py +++ b/backend/apps/common/open_ai.py @@ -11,7 +11,7 @@ class OpenAi: """Open AI communication class.""" - def __init__(self, model="gpt-4o-mini", max_tokens=1000, temperature=0.7, prompt=None): + def __init__(self, model="gpt-4o-mini", max_tokens=1000, temperature=0.7): """OpenAi constructor.""" self.client = openai.OpenAI( api_key=settings.OPEN_AI_SECRET_KEY, @@ -22,18 +22,21 @@ def __init__(self, model="gpt-4o-mini", max_tokens=1000, temperature=0.7, prompt self.model = model self.temperature = temperature - if prompt: - self.set_prompt(prompt) - - def set_prompt(self, content): + def set_input(self, content): """Set system role content.""" - self.prompt = content + self.input = content return self - def set_input(self, content): + def set_max_tokens(self, max_tokens): + """Set max tokens.""" + self.max_tokens = max_tokens + + return self + + def set_prompt(self, content): """Set system role content.""" - self.input = content + self.prompt = content return self diff --git a/backend/apps/common/utils.py b/backend/apps/common/utils.py index 94fbb3383..1d265eb88 100644 --- a/backend/apps/common/utils.py +++ b/backend/apps/common/utils.py @@ -1,5 +1,13 @@ """Common app utils.""" +from django.conf import settings +from django.urls import reverse + + +def get_absolute_url(view_name): + """Return absolute URL for a view.""" + return f"{settings.SITE_URL}{reverse(view_name)}" + def join_values(fields, delimiter=" "): """Join non-empty field values using the delimiter.""" diff --git a/backend/apps/github/admin.py b/backend/apps/github/admin.py index f5c8294db..51839dbe8 100644 --- a/backend/apps/github/admin.py +++ b/backend/apps/github/admin.py @@ -3,7 +3,12 @@ from django.contrib import admin from django.utils.safestring import mark_safe -from apps.github.models import Issue, Label, Organization, Release, Repository, User +from apps.github.models.issue import Issue +from apps.github.models.label import Label +from apps.github.models.organization import Organization +from apps.github.models.release import Release +from apps.github.models.repository import Repository +from apps.github.models.user import User class LabelAdmin(admin.ModelAdmin): @@ -26,7 +31,7 @@ class IssueAdmin(admin.ModelAdmin): "state", "is_locked", ) - search_fields = ("title", "body", "summary") + search_fields = ("title",) def custom_field_github_url(self, obj): """Issue GitHub URL.""" diff --git a/backend/apps/github/api/__init__.py b/backend/apps/github/api/__init__.py index dfca495e4..9066aab94 100644 --- a/backend/apps/github/api/__init__.py +++ b/backend/apps/github/api/__init__.py @@ -1,8 +1 @@ """GitHub app API.""" - -from apps.github.api.issue import IssueViewSet -from apps.github.api.label import LabelViewSet -from apps.github.api.organization import OrganizationViewSet -from apps.github.api.release import ReleaseViewSet -from apps.github.api.repository import RepositoryViewSet -from apps.github.api.user import UserViewSet diff --git a/backend/apps/github/api/issue.py b/backend/apps/github/api/issue.py index 92466731a..b957c9e02 100644 --- a/backend/apps/github/api/issue.py +++ b/backend/apps/github/api/issue.py @@ -2,7 +2,7 @@ from rest_framework import serializers, viewsets -from apps.github.models import Issue +from apps.github.models.issue import Issue # Serializers define the API representation. diff --git a/backend/apps/github/api/label.py b/backend/apps/github/api/label.py index 9ce98276b..dccaea1fe 100644 --- a/backend/apps/github/api/label.py +++ b/backend/apps/github/api/label.py @@ -2,7 +2,7 @@ from rest_framework import serializers, viewsets -from apps.github.models import Label +from apps.github.models.label import Label # Serializers define the API representation. diff --git a/backend/apps/github/api/organization.py b/backend/apps/github/api/organization.py index b9e07d1a5..eedfc699d 100644 --- a/backend/apps/github/api/organization.py +++ b/backend/apps/github/api/organization.py @@ -2,7 +2,7 @@ from rest_framework import serializers, viewsets -from apps.github.models import Organization +from apps.github.models.organization import Organization # Serializers define the API representation. diff --git a/backend/apps/github/api/release.py b/backend/apps/github/api/release.py index 958614670..0e99c5183 100644 --- a/backend/apps/github/api/release.py +++ b/backend/apps/github/api/release.py @@ -2,7 +2,7 @@ from rest_framework import serializers, viewsets -from apps.github.models import Release +from apps.github.models.release import Release # Serializers define the API representation. diff --git a/backend/apps/github/api/repository.py b/backend/apps/github/api/repository.py index fa7d7e2b0..56a8e421b 100644 --- a/backend/apps/github/api/repository.py +++ b/backend/apps/github/api/repository.py @@ -2,7 +2,7 @@ from rest_framework import serializers, viewsets -from apps.github.models import Repository +from apps.github.models.repository import Repository # Serializers define the API representation. diff --git a/backend/apps/github/api/urls.py b/backend/apps/github/api/urls.py index 38162d550..60673d1ae 100644 --- a/backend/apps/github/api/urls.py +++ b/backend/apps/github/api/urls.py @@ -2,14 +2,12 @@ from rest_framework import routers -from apps.github.api import ( - IssueViewSet, - LabelViewSet, - OrganizationViewSet, - ReleaseViewSet, - RepositoryViewSet, - UserViewSet, -) +from apps.github.api.issue import IssueViewSet +from apps.github.api.label import LabelViewSet +from apps.github.api.organization import OrganizationViewSet +from apps.github.api.release import ReleaseViewSet +from apps.github.api.repository import RepositoryViewSet +from apps.github.api.user import UserViewSet router = routers.SimpleRouter() diff --git a/backend/apps/github/api/user.py b/backend/apps/github/api/user.py index 96f18828f..db63097cf 100644 --- a/backend/apps/github/api/user.py +++ b/backend/apps/github/api/user.py @@ -2,7 +2,7 @@ from rest_framework import serializers, viewsets -from apps.github.models import User +from apps.github.models.user import User # Serializers define the API representation. diff --git a/backend/apps/github/common.py b/backend/apps/github/common.py new file mode 100644 index 000000000..ed4e4d337 --- /dev/null +++ b/backend/apps/github/common.py @@ -0,0 +1,108 @@ +"""GitHub app common module.""" + +import logging + +from github.GithubException import UnknownObjectException + +from apps.github.models.issue import Issue +from apps.github.models.label import Label +from apps.github.models.organization import Organization +from apps.github.models.release import Release +from apps.github.models.repository import Repository +from apps.github.models.user import User +from apps.github.utils import check_owasp_site_repository + +logger = logging.getLogger(__name__) + + +def sync_repository(gh_repository, organization=None, user=None): + """Sync GitHub repository data.""" + entity_key = gh_repository.name.lower() + is_owasp_site_repository = check_owasp_site_repository(entity_key) + + # GitHub repository organization. + if organization is None: + gh_organization = gh_repository.organization + if gh_organization is not None: + organization = Organization.update_data(gh_organization) + + # GitHub repository owner. + if user is None: + user = User.update_data(gh_repository.owner) + + # GitHub repository. + commits = gh_repository.get_commits() + contributors = gh_repository.get_contributors() + languages = None if is_owasp_site_repository else gh_repository.get_languages() + + repository = Repository.update_data( + gh_repository, + commits=commits, + contributors=contributors, + languages=languages, + organization=organization, + user=user, + ) + + # GitHub repository issues. + if not repository.is_archived: + # Sync open issues for the first run. + kwargs = { + "direction": "asc", + "sort": "created", + "state": "open", + } + latest_issue = Issue.objects.filter(repository=repository).order_by("-updated_at").first() + if latest_issue: + # Sync open/closed issues for subsequent runs. + kwargs.update( + { + "since": latest_issue.updated_at, + "state": "all", + } + ) + for gh_issue in gh_repository.get_issues(**kwargs): + # Skip pull requests. + if gh_issue.pull_request: + continue + + # GitHub issue author. + if gh_issue.user is not None: + author = User.update_data(gh_issue.user) + + issue = Issue.update_data(gh_issue, author=author, repository=repository) + + # Assignees. + issue.assignees.clear() + for gh_issue_assignee in gh_issue.assignees: + issue.assignees.add(User.update_data(gh_issue_assignee)) + + # Labels. + issue.labels.clear() + for gh_issue_label in gh_issue.labels: + try: + issue.labels.add(Label.update_data(gh_issue_label)) + except UnknownObjectException: + logger.info("Couldn't get GitHub issue label %s", issue.url) + + # GitHub repository releases. + releases = [] + if not is_owasp_site_repository: + existing_release_node_ids = set( + Release.objects.filter(repository=repository).values_list("node_id", flat=True) + if repository.id + else () + ) + for gh_release in gh_repository.get_releases(): + release_node_id = Release.get_node_id(gh_release) + if release_node_id in existing_release_node_ids: + break + + # GitHub release author. + if gh_release.author is not None: + author = User.update_data(gh_release.author) + + # GitHub release. + releases.append(Release.update_data(gh_release, author=author, repository=repository)) + + return organization, repository, releases diff --git a/backend/apps/github/index/issue.py b/backend/apps/github/index/issue.py index f3fd57b37..06ec9beaa 100644 --- a/backend/apps/github/index/issue.py +++ b/backend/apps/github/index/issue.py @@ -40,6 +40,7 @@ class IssueIndex(AlgoliaIndex): ) settings = { + "attributeForDistinct": "idx_project_name", "minProximity": 4, "indexLanguages": ["en"], "customRanking": [ diff --git a/backend/apps/github/management/commands/github_summarize_issues.py b/backend/apps/github/management/commands/github_enrich_issues.py similarity index 62% rename from backend/apps/github/management/commands/github_summarize_issues.py rename to backend/apps/github/management/commands/github_enrich_issues.py index 2a62bd05e..f8d0fbc2c 100644 --- a/backend/apps/github/management/commands/github_summarize_issues.py +++ b/backend/apps/github/management/commands/github_enrich_issues.py @@ -5,7 +5,7 @@ from django.core.management.base import BaseCommand from apps.common.open_ai import OpenAi -from apps.github.models import Issue +from apps.github.models.issue import Issue logger = logging.getLogger(__name__) @@ -27,22 +27,33 @@ def handle(self, *args, **options): prefix = f"{idx + offset + 1} of {open_issues_count - offset}" print(f"{prefix:<10} {issue.title}") - open_ai.set_prompt( + open_ai.set_input(f"{issue.title}\r\n{issue.body}") + + # Generate summary + open_ai.set_max_tokens(500).set_prompt( ( "Summarize the following GitHub issue using imperative mood." - "Add a good amount of technical details." - "Include possible first steps of tackling the problem." + "Use a good amount technical details." + "Avoid using lists for description." + "Do not use mardown in output." ) if issue.project.is_code_type or issue.project.is_tool_type else ( "Summarize the following GitHub issue." "Avoid mentioning author's name or issue creation date." - "Add a hint of what needs to be done if possible." + "Avoid using lists for description." + "Do not use markdown in output." ) ) + issue.summary = open_ai.complete() or "" + + # Generate hint + open_ai.set_max_tokens(1000).set_prompt( + "Describe possible first steps of approaching the problem." + ) + issue.hint = open_ai.complete() or "" - issue.summary = open_ai.set_input(f"{issue.title}\r\n{issue.body}").complete() or "" issues.append(issue) # Bulk save data. - Issue.bulk_save(issues, fields=("summary",)) + Issue.bulk_save(issues, fields=("hint", "summary")) diff --git a/backend/apps/github/management/commands/github_sync_owasp_organization.py b/backend/apps/github/management/commands/github_sync_owasp_organization.py index 6a8d57ace..eb020126c 100644 --- a/backend/apps/github/management/commands/github_sync_owasp_organization.py +++ b/backend/apps/github/management/commands/github_sync_owasp_organization.py @@ -7,10 +7,15 @@ from django.core.management.base import BaseCommand from github.GithubException import BadCredentialsException +from apps.github.common import sync_repository from apps.github.constants import GITHUB_ITEMS_PER_PAGE -from apps.github.models import Release, Repository, sync_repository +from apps.github.models.release import Release +from apps.github.models.repository import Repository from apps.owasp.constants import OWASP_ORGANIZATION_NAME -from apps.owasp.models import Chapter, Committee, Event, Project +from apps.owasp.models.chapter import Chapter +from apps.owasp.models.committee import Committee +from apps.owasp.models.event import Event +from apps.owasp.models.project import Project logger = logging.getLogger(__name__) diff --git a/backend/apps/github/management/commands/github_sync_related_repositories.py b/backend/apps/github/management/commands/github_sync_related_repositories.py index 0880c4cb9..5dca61725 100644 --- a/backend/apps/github/management/commands/github_sync_related_repositories.py +++ b/backend/apps/github/management/commands/github_sync_related_repositories.py @@ -7,10 +7,12 @@ from django.core.management.base import BaseCommand from github.GithubException import UnknownObjectException +from apps.github.common import sync_repository from apps.github.constants import GITHUB_ITEMS_PER_PAGE -from apps.github.models import Issue, Release, sync_repository +from apps.github.models.issue import Issue +from apps.github.models.release import Release from apps.github.utils import get_repository_path -from apps.owasp.models import Project +from apps.owasp.models.project import Project logger = logging.getLogger(__name__) @@ -22,7 +24,7 @@ def add_arguments(self, parser): parser.add_argument("--offset", default=0, required=False, type=int) def handle(self, *args, **options): - active_projects = Project.objects.filter(is_active=True).order_by("created_at") + active_projects = Project.active_projects.order_by("created_at") active_projects_count = active_projects.count() gh = github.Github(os.getenv("GITHUB_TOKEN"), per_page=GITHUB_ITEMS_PER_PAGE) diff --git a/backend/apps/github/migrations/0003_issue_hint_alter_issue_summary.py b/backend/apps/github/migrations/0003_issue_hint_alter_issue_summary.py new file mode 100644 index 000000000..e8ae93f6c --- /dev/null +++ b/backend/apps/github/migrations/0003_issue_hint_alter_issue_summary.py @@ -0,0 +1,22 @@ +# Generated by Django 5.1.1 on 2024-09-14 03:00 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("github", "0002_issue_summary"), + ] + + operations = [ + migrations.AddField( + model_name="issue", + name="hint", + field=models.TextField(default="", verbose_name="Resolution hint"), + ), + migrations.AlterField( + model_name="issue", + name="summary", + field=models.TextField(default="", verbose_name="Summary"), + ), + ] diff --git a/backend/apps/github/migrations/0004_alter_issue_hint.py b/backend/apps/github/migrations/0004_alter_issue_hint.py new file mode 100644 index 000000000..83af14b2c --- /dev/null +++ b/backend/apps/github/migrations/0004_alter_issue_hint.py @@ -0,0 +1,17 @@ +# Generated by Django 5.1.1 on 2024-09-14 03:01 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("github", "0003_issue_hint_alter_issue_summary"), + ] + + operations = [ + migrations.AlterField( + model_name="issue", + name="hint", + field=models.TextField(default="", verbose_name="Hint"), + ), + ] diff --git a/backend/apps/github/migrations/0005_alter_issue_hint_alter_issue_lock_reason_and_more.py b/backend/apps/github/migrations/0005_alter_issue_hint_alter_issue_lock_reason_and_more.py new file mode 100644 index 000000000..d401b0a1d --- /dev/null +++ b/backend/apps/github/migrations/0005_alter_issue_hint_alter_issue_lock_reason_and_more.py @@ -0,0 +1,29 @@ +# Generated by Django 5.1.1 on 2024-09-14 03:04 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("github", "0004_alter_issue_hint"), + ] + + operations = [ + migrations.AlterField( + model_name="issue", + name="hint", + field=models.TextField(blank=True, default="", verbose_name="Hint"), + ), + migrations.AlterField( + model_name="issue", + name="lock_reason", + field=models.CharField( + blank=True, default="", max_length=200, verbose_name="Lock reason" + ), + ), + migrations.AlterField( + model_name="issue", + name="summary", + field=models.TextField(blank=True, default="", verbose_name="Summary"), + ), + ] diff --git a/backend/apps/github/migrations/0006_alter_issue_state_reason.py b/backend/apps/github/migrations/0006_alter_issue_state_reason.py new file mode 100644 index 000000000..6f019d125 --- /dev/null +++ b/backend/apps/github/migrations/0006_alter_issue_state_reason.py @@ -0,0 +1,19 @@ +# Generated by Django 5.1.1 on 2024-09-14 03:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("github", "0005_alter_issue_hint_alter_issue_lock_reason_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="issue", + name="state_reason", + field=models.CharField( + blank=True, default="", max_length=200, verbose_name="State reason" + ), + ), + ] diff --git a/backend/apps/github/models/__init__.py b/backend/apps/github/models/__init__.py index d77406387..8317faf60 100644 --- a/backend/apps/github/models/__init__.py +++ b/backend/apps/github/models/__init__.py @@ -1,108 +1 @@ """Github app.""" - -import logging - -from github.GithubException import UnknownObjectException - -from apps.github.models.issue import Issue -from apps.github.models.label import Label -from apps.github.models.organization import Organization -from apps.github.models.release import Release -from apps.github.models.repository import Repository -from apps.github.models.user import User -from apps.github.utils import check_owasp_site_repository - -logger = logging.getLogger(__name__) - - -def sync_repository(gh_repository, organization=None, user=None): - """Sync GitHub repository data.""" - entity_key = gh_repository.name.lower() - is_owasp_site_repository = check_owasp_site_repository(entity_key) - - # GitHub repository organization. - if organization is None: - gh_organization = gh_repository.organization - if gh_organization is not None: - organization = Organization.update_data(gh_organization) - - # GitHub repository owner. - if user is None: - user = User.update_data(gh_repository.owner) - - # GitHub repository. - commits = gh_repository.get_commits() - contributors = gh_repository.get_contributors() - languages = None if is_owasp_site_repository else gh_repository.get_languages() - - repository = Repository.update_data( - gh_repository, - commits=commits, - contributors=contributors, - languages=languages, - organization=organization, - user=user, - ) - - # GitHub repository issues. - if not repository.is_archived: - # Sync open issues for the first run. - kwargs = { - "direction": "asc", - "sort": "created", - "state": "open", - } - latest_issue = Issue.objects.filter(repository=repository).order_by("-updated_at").first() - if latest_issue: - # Sync open/closed issues for subsequent runs. - kwargs.update( - { - "since": latest_issue.updated_at, - "state": "all", - } - ) - for gh_issue in gh_repository.get_issues(**kwargs): - # Skip pull requests. - if gh_issue.pull_request: - continue - - # GitHub issue author. - if gh_issue.user is not None: - author = User.update_data(gh_issue.user) - - issue = Issue.update_data(gh_issue, author=author, repository=repository) - - # Assignees. - issue.assignees.clear() - for gh_issue_assignee in gh_issue.assignees: - issue.assignees.add(User.update_data(gh_issue_assignee)) - - # Labels. - issue.labels.clear() - for gh_issue_label in gh_issue.labels: - try: - issue.labels.add(Label.update_data(gh_issue_label)) - except UnknownObjectException: - logger.info("Couldn't get GitHub issue label %s", issue.url) - - # GitHub repository releases. - releases = [] - if not is_owasp_site_repository: - existing_release_node_ids = set( - Release.objects.filter(repository=repository).values_list("node_id", flat=True) - if repository.id - else () - ) - for gh_release in gh_repository.get_releases(): - release_node_id = Release.get_node_id(gh_release) - if release_node_id in existing_release_node_ids: - break - - # GitHub release author. - if gh_release.author is not None: - author = User.update_data(gh_release.author) - - # GitHub release. - releases.append(Release.update_data(gh_release, author=author, repository=repository)) - - return organization, repository, releases diff --git a/backend/apps/github/models/issue.py b/backend/apps/github/models/issue.py index bc0e41c34..97dd2b6de 100644 --- a/backend/apps/github/models/issue.py +++ b/backend/apps/github/models/issue.py @@ -1,5 +1,7 @@ """Github app issue model.""" +from functools import lru_cache + from django.db import models from apps.common.models import BulkSaveModel, TimestampedModel @@ -25,17 +27,25 @@ class State(models.TextChoices): title = models.CharField(verbose_name="Title", max_length=500) body = models.TextField(verbose_name="Body", default="") - summary = models.TextField(verbose_name="Summary", max_length=3000, default="") + + summary = models.TextField( + verbose_name="Summary", default="", blank=True + ) # AI generated summary + hint = models.TextField(verbose_name="Hint", default="", blank=True) # AI generated hint state = models.CharField( verbose_name="State", max_length=20, choices=State, default=State.OPEN ) - state_reason = models.CharField(verbose_name="State reason", max_length=200, default="") + state_reason = models.CharField( + verbose_name="State reason", max_length=200, default="", blank=True + ) url = models.URLField(verbose_name="URL", max_length=500, default="") number = models.PositiveBigIntegerField(verbose_name="Number", default=0) sequence_id = models.PositiveBigIntegerField(verbose_name="Issue ID", default=0) is_locked = models.BooleanField(verbose_name="Is locked", default=False) - lock_reason = models.CharField(verbose_name="Lock reason", max_length=200, default="") + lock_reason = models.CharField( + verbose_name="Lock reason", max_length=200, default="", blank=True + ) comments_count = models.PositiveIntegerField(verbose_name="Comments", default=0) @@ -130,6 +140,12 @@ def bulk_save(issues, fields=None): """Bulk save issues.""" BulkSaveModel.bulk_save(Issue, issues, fields=fields) + @staticmethod + @lru_cache(maxsize=128) + def open_issues_count(): + """Return open issues count.""" + return Issue.open_issues.count() + @staticmethod def update_data(gh_issue, author=None, repository=None, save=True): """Update issue data.""" diff --git a/backend/apps/owasp/admin.py b/backend/apps/owasp/admin.py index f06939b5e..b4e00c72f 100644 --- a/backend/apps/owasp/admin.py +++ b/backend/apps/owasp/admin.py @@ -3,7 +3,10 @@ from django.contrib import admin from django.utils.safestring import mark_safe -from apps.owasp.models import Chapter, Committee, Event, Project +from apps.owasp.models.chapter import Chapter +from apps.owasp.models.committee import Committee +from apps.owasp.models.event import Event +from apps.owasp.models.project import Project class ChapterAdmin(admin.ModelAdmin): diff --git a/backend/apps/owasp/api/__init__.py b/backend/apps/owasp/api/__init__.py index cb9683024..37e80bedf 100644 --- a/backend/apps/owasp/api/__init__.py +++ b/backend/apps/owasp/api/__init__.py @@ -1,6 +1 @@ """OWASP app API.""" - -from apps.owasp.api.chapter import ChapterViewSet -from apps.owasp.api.committee import CommitteeViewSet -from apps.owasp.api.event import EventViewSet -from apps.owasp.api.project import ProjectViewSet diff --git a/backend/apps/owasp/api/chapter.py b/backend/apps/owasp/api/chapter.py index 1a3b77f8c..bd91b5e7c 100644 --- a/backend/apps/owasp/api/chapter.py +++ b/backend/apps/owasp/api/chapter.py @@ -2,7 +2,7 @@ from rest_framework import serializers, viewsets -from apps.owasp.models import Chapter +from apps.owasp.models.chapter import Chapter # Serializers define the API representation. diff --git a/backend/apps/owasp/api/committee.py b/backend/apps/owasp/api/committee.py index a18f38520..7e30c720f 100644 --- a/backend/apps/owasp/api/committee.py +++ b/backend/apps/owasp/api/committee.py @@ -2,7 +2,7 @@ from rest_framework import serializers, viewsets -from apps.owasp.models import Committee +from apps.owasp.models.committee import Committee # Serializers define the API representation. diff --git a/backend/apps/owasp/api/event.py b/backend/apps/owasp/api/event.py index 8fd74fdf3..2420d1d90 100644 --- a/backend/apps/owasp/api/event.py +++ b/backend/apps/owasp/api/event.py @@ -2,7 +2,7 @@ from rest_framework import serializers, viewsets -from apps.owasp.models import Event +from apps.owasp.models.event import Event # Serializers define the API representation. diff --git a/backend/apps/owasp/api/project.py b/backend/apps/owasp/api/project.py index e67eedabe..74d869a7b 100644 --- a/backend/apps/owasp/api/project.py +++ b/backend/apps/owasp/api/project.py @@ -2,7 +2,7 @@ from rest_framework import serializers, viewsets -from apps.owasp.models import Project +from apps.owasp.models.project import Project # Serializers define the API representation. diff --git a/backend/apps/owasp/api/search/__init__.py b/backend/apps/owasp/api/search/__init__.py index 3ac965a6c..5ec91620c 100644 --- a/backend/apps/owasp/api/search/__init__.py +++ b/backend/apps/owasp/api/search/__init__.py @@ -1,4 +1 @@ """OWASP app search API.""" - -from apps.owasp.api.search.issue import project_issues -from apps.owasp.api.search.project import projects diff --git a/backend/apps/owasp/api/search/issue.py b/backend/apps/owasp/api/search/issue.py index 61dbd539c..983137e77 100644 --- a/backend/apps/owasp/api/search/issue.py +++ b/backend/apps/owasp/api/search/issue.py @@ -3,23 +3,29 @@ from algoliasearch_django import raw_search from django.http import JsonResponse -from apps.github.models import Issue +from apps.github.models.issue import Issue -def project_issues(request): - """Search project issues view.""" - issues_params = { +def get_issues(query, distinct=False, limit=25): + """Return issues relevant to a search query.""" + params = { "attributesToRetrieve": [ "idx_created_at", "idx_project_name", "idx_repository_languages", + "idx_summary", "idx_title", "idx_url", ], - "hitsPerPage": 25, + "hitsPerPage": limit, } - return JsonResponse( - raw_search(Issue, request.GET.get("q", ""), issues_params)["hits"], - safe=False, - ) + if distinct: + params["distinct"] = 1 + + return raw_search(Issue, query, params)["hits"] + + +def project_issues(request): + """Search project issues view.""" + return JsonResponse(get_issues(request.GET.get("q", "")), safe=False) diff --git a/backend/apps/owasp/api/search/project.py b/backend/apps/owasp/api/search/project.py index ce7efc678..6ebcfb962 100644 --- a/backend/apps/owasp/api/search/project.py +++ b/backend/apps/owasp/api/search/project.py @@ -3,7 +3,7 @@ from algoliasearch_django import raw_search from django.http import JsonResponse -from apps.owasp.models import Project +from apps.owasp.models.project import Project def projects(request): diff --git a/backend/apps/owasp/api/urls.py b/backend/apps/owasp/api/urls.py index 957866a17..31fd1ff4c 100644 --- a/backend/apps/owasp/api/urls.py +++ b/backend/apps/owasp/api/urls.py @@ -2,12 +2,10 @@ from rest_framework import routers -from apps.owasp.api import ( - ChapterViewSet, - CommitteeViewSet, - EventViewSet, - ProjectViewSet, -) +from apps.owasp.api.chapter import ChapterViewSet +from apps.owasp.api.committee import CommitteeViewSet +from apps.owasp.api.event import EventViewSet +from apps.owasp.api.project import ProjectViewSet router = routers.SimpleRouter() diff --git a/backend/apps/owasp/management/__init__.py b/backend/apps/owasp/management/__init__.py index 2f58a42da..72e9c8e98 100644 --- a/backend/apps/owasp/management/__init__.py +++ b/backend/apps/owasp/management/__init__.py @@ -1 +1 @@ -"""Common management module.""" +"""OWASP management module.""" diff --git a/backend/apps/owasp/management/commands/owasp_aggregate_projects_data.py b/backend/apps/owasp/management/commands/owasp_aggregate_projects_data.py index ad85aa7f9..8ed439914 100644 --- a/backend/apps/owasp/management/commands/owasp_aggregate_projects_data.py +++ b/backend/apps/owasp/management/commands/owasp_aggregate_projects_data.py @@ -2,7 +2,7 @@ from django.core.management.base import BaseCommand -from apps.owasp.models import Project +from apps.owasp.models.project import Project class Command(BaseCommand): diff --git a/backend/apps/owasp/management/commands/owasp_scrape_site_data.py b/backend/apps/owasp/management/commands/owasp_scrape_site_data.py index baf600c32..a4ef589cf 100644 --- a/backend/apps/owasp/management/commands/owasp_scrape_site_data.py +++ b/backend/apps/owasp/management/commands/owasp_scrape_site_data.py @@ -12,7 +12,7 @@ from apps.github.constants import GITHUB_ITEMS_PER_PAGE, GITHUB_USER_RE from apps.github.utils import normalize_url -from apps.owasp.models import Project +from apps.owasp.models.project import Project logger = logging.getLogger(__name__) diff --git a/backend/apps/owasp/models/__init__.py b/backend/apps/owasp/models/__init__.py index b743028f2..e69de29bb 100644 --- a/backend/apps/owasp/models/__init__.py +++ b/backend/apps/owasp/models/__init__.py @@ -1,4 +0,0 @@ -from apps.owasp.models.chapter import Chapter -from apps.owasp.models.committee import Committee -from apps.owasp.models.event import Event -from apps.owasp.models.project import Project diff --git a/backend/apps/owasp/models/managers/__init__.py b/backend/apps/owasp/models/managers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/apps/owasp/models/managers/project.py b/backend/apps/owasp/models/managers/project.py new file mode 100644 index 000000000..ccd50628c --- /dev/null +++ b/backend/apps/owasp/models/managers/project.py @@ -0,0 +1,11 @@ +"""OWASP app project managers.""" + +from django.db import models + + +class ActiveProjectManager(models.Manager): + """Active projects.""" + + def get_queryset(self): + """Get queryset.""" + return super().get_queryset().filter(is_active=True) diff --git a/backend/apps/owasp/models/project.py b/backend/apps/owasp/models/project.py index 926ccee24..31b848e8b 100644 --- a/backend/apps/owasp/models/project.py +++ b/backend/apps/owasp/models/project.py @@ -1,15 +1,21 @@ """OWASP app project models.""" +from functools import lru_cache + from django.db import models from apps.common.models import BulkSaveModel, TimestampedModel from apps.owasp.models.common import OwaspEntity +from apps.owasp.models.managers.project import ActiveProjectManager from apps.owasp.models.mixins import ProjectIndexMixin class Project(BulkSaveModel, OwaspEntity, ProjectIndexMixin, TimestampedModel): """Project model.""" + objects = models.Manager() + active_projects = ActiveProjectManager() + class Meta: db_table = "owasp_projects" verbose_name_plural = "Projects" @@ -183,6 +189,12 @@ def from_github(self, gh_repository, repository): # FKs. self.owasp_repository = repository + @staticmethod + @lru_cache(maxsize=128) + def active_projects_count(): + """Return active projects count.""" + return Project.active_projects.count() + @staticmethod def bulk_save(projects): """Bulk save projects.""" diff --git a/backend/apps/owasp/templates/search/issue.html b/backend/apps/owasp/templates/search/issue.html index 7520c82f4..0da8e5888 100644 --- a/backend/apps/owasp/templates/search/issue.html +++ b/backend/apps/owasp/templates/search/issue.html @@ -18,7 +18,7 @@

Find an issue to work on

name="q" placeholder="Type To Search..." type="search" - hx-get="{% url 'search-project-issues' %}" + hx-get="{% url 'api-search-project-issues' %}" hx-indicator=".htmx-indicator" hx-swap="none" hx-target="#search-results" diff --git a/backend/apps/owasp/templates/search/project.html b/backend/apps/owasp/templates/search/project.html index b774c6f98..13e241bf3 100644 --- a/backend/apps/owasp/templates/search/project.html +++ b/backend/apps/owasp/templates/search/project.html @@ -18,7 +18,7 @@

Find a project

name="q" placeholder="Type To Search..." type="search" - hx-get="{% url 'search-projects' %}" + hx-get="{% url 'api-search-projects' %}" hx-indicator=".htmx-indicator" hx-swap="none" hx-target="#search-results" diff --git a/backend/apps/slack/__init__.py b/backend/apps/slack/__init__.py new file mode 100644 index 000000000..e5d45429a --- /dev/null +++ b/backend/apps/slack/__init__.py @@ -0,0 +1 @@ +from apps.slack.commands import * # noqa: F403 diff --git a/backend/apps/slack/apps.py b/backend/apps/slack/apps.py new file mode 100644 index 000000000..636a92ea0 --- /dev/null +++ b/backend/apps/slack/apps.py @@ -0,0 +1,10 @@ +from django.apps import AppConfig +from django.conf import settings +from slack_bolt import App + + +class SlackConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "apps.slack" + + app = App(token=settings.SLACK_BOT_TOKEN) if settings.SLACK_BOT_TOKEN != "None" else None # noqa: S105 diff --git a/backend/apps/slack/blocks.py b/backend/apps/slack/blocks.py new file mode 100644 index 000000000..9be99061b --- /dev/null +++ b/backend/apps/slack/blocks.py @@ -0,0 +1,14 @@ +"""Slack blocks.""" + + +def divider(): + """Return divider block.""" + return {"type": "divider"} + + +def markdown(text): + """Return markdown block.""" + return { + "type": "section", + "text": {"type": "mrkdwn", "text": text}, + } diff --git a/backend/apps/slack/commands/__init__.py b/backend/apps/slack/commands/__init__.py new file mode 100644 index 000000000..662515c11 --- /dev/null +++ b/backend/apps/slack/commands/__init__.py @@ -0,0 +1 @@ +from apps.slack.commands.contribute import contribute diff --git a/backend/apps/slack/commands/contribute.py b/backend/apps/slack/commands/contribute.py new file mode 100644 index 000000000..edae78537 --- /dev/null +++ b/backend/apps/slack/commands/contribute.py @@ -0,0 +1,63 @@ +"""Slack bot contribute command.""" + +from apps.common.utils import get_absolute_url +from apps.slack.apps import SlackConfig +from apps.slack.blocks import divider, markdown +from apps.slack.constants import OWASP_PROJECT_NEST_CHANNEL_ID + + +def contribute(ack, say, command): + """Slack /contribute command handler.""" + from apps.github.models.issue import Issue + from apps.owasp.api.search.issue import get_issues + from apps.owasp.models.project import Project + + ack() + + # TODO(arkid15r): consider adding escaping for the user's input. + search_query = command["text"] + blocks = [ + divider(), + markdown(f'*No results found for "{search_query}"*\n'), + ] + issues = get_issues(search_query, distinct=True, limit=10) + + if issues: + blocks = [ + divider(), + markdown( + ( + f"\n*Here are top 10 most relevant issues (1 issue per project) " + f"that I found for*\n `/contribute {search_query}`:\n" + ) + if search_query + else ( + "\n*Here are top 10 most recent issues (1 issue per project):*\n" + "You can refine the results by using a more specific query, e.g.\n" + "`/contribute python good first issue`" + ) + ), + ] + for idx, issue in enumerate(issues): + blocks.append( + markdown( + f"\n{idx + 1}. {issue['idx_project_name']}\n" + f"<{issue['idx_url']}|{issue['idx_title']}>\n" + ), + ) + + blocks.append( + markdown( + f"⚠️ *Extended search over {Issue.open_issues_count()} open issues in " + f"{Project.active_projects_count()} OWASP projects is available at " + f"<{get_absolute_url('project-issues')}>*\n" + "You can share feedback on your search experience " + f"in the <#{OWASP_PROJECT_NEST_CHANNEL_ID}|project-nest> channel." + ), + ) + + say(blocks=blocks) + + +if SlackConfig.app: + contribute = SlackConfig.app.command("/contribute")(contribute) diff --git a/backend/apps/slack/constants.py b/backend/apps/slack/constants.py new file mode 100644 index 000000000..a6b50d370 --- /dev/null +++ b/backend/apps/slack/constants.py @@ -0,0 +1,3 @@ +"""Slack app constants.""" + +OWASP_PROJECT_NEST_CHANNEL_ID = "C07JLLG2GFQ" diff --git a/backend/apps/slack/management/__init__.py b/backend/apps/slack/management/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/apps/slack/management/commands/__init__.py b/backend/apps/slack/management/commands/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/backend/apps/slack/management/commands/run_slack_bot.py b/backend/apps/slack/management/commands/run_slack_bot.py new file mode 100644 index 000000000..cd9aeb74d --- /dev/null +++ b/backend/apps/slack/management/commands/run_slack_bot.py @@ -0,0 +1,19 @@ +"""A command to start Slack bot.""" + +import logging + +from django.conf import settings +from django.core.management.base import BaseCommand +from slack_bolt.adapter.socket_mode import SocketModeHandler + +from apps.slack.apps import SlackConfig + +logger = logging.getLogger(__name__) + + +class Command(BaseCommand): + help = "Runs Slack bot application." + + def handle(self, *args, **options): + if settings.SLACK_APP_TOKEN != "None": # noqa: S105 + SocketModeHandler(SlackConfig.app, settings.SLACK_APP_TOKEN).start() diff --git a/backend/data/nest.json b/backend/data/nest.json index 67da3728f..d3c174573 100644 --- a/backend/data/nest.json +++ b/backend/data/nest.json @@ -4,10 +4,11 @@ "pk": 1, "fields": { "nest_created_at": "2024-09-11T19:06:59.693Z", - "nest_updated_at": "2024-09-11T19:06:59.693Z", + "nest_updated_at": "2024-09-13T14:54:00.231Z", "node_id": "I_kwDOMl6dl86V22oI", "title": "Hardcoded username and endless loop tests didn't work ", "body": "Hi there!\r\nI tried ABAP-Code-Analyzer with the \"python main.py ABAPTeste02ListagemdeOV.txt\" command. ABAPTeste02ListagemdeOV.txt ( attached) file was in the same directory than ABAP-Code-Analyzer.\r\nThe xls output file was generated with no lines (attached).\r\nI had understood ABAP-Code-Analyzer should \"caught\" hardcode user name and endless loop...\r\nThanks!\r\n[abap_security_scan_report.xlsx](https://github.com/user-attachments/files/16932297/abap_security_scan_report.xlsx)\r\n[ABAPTeste02ListagemdeOV.txt](https://github.com/user-attachments/files/16932299/ABAPTeste02ListagemdeOV.txt)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/abap-code-scanner/issues/1", @@ -30,10 +31,11 @@ "pk": 2, "fields": { "nest_created_at": "2024-09-11T19:10:10.587Z", - "nest_updated_at": "2024-09-11T19:10:10.587Z", + "nest_updated_at": "2024-09-13T14:56:28.200Z", "node_id": "I_kwDOLYZa3c6BGmff", "title": "SDRF Working Group Meetings", "body": "OWASP SDRF Weekly Meeting\r\n**First Meeting**: Monday, 4 March · 5:00 – 6:00pm\r\n**Time zone**: Australia/Sydney\r\n**URL**: https://meet.google.com/yvq-phpg-viy\r\n\r\nWill re-occur weekly on Mondays 5PM AEDT. ", + "summary": "A weekly meeting for the OWASP SDRF Working Group is scheduled to take place every Monday at 5:00 PM AEDT, starting on March 4. Participants can join via the provided Google Meet link. It may be helpful to set reminders for these recurring meetings.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-secure-development-and-release-framework/issues/1", @@ -56,10 +58,11 @@ "pk": 3, "fields": { "nest_created_at": "2024-09-11T19:10:47.813Z", - "nest_updated_at": "2024-09-11T19:10:47.813Z", + "nest_updated_at": "2024-09-13T14:56:57.808Z", "node_id": "I_kwDOLD0lt86ByuLx", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description. This will help clarify the project's purpose and improve understanding for users and contributors. It is suggested to draft a concise summary that highlights the project's goals, features, and potential use cases.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-cybersecurity-certification-course/issues/1", @@ -82,10 +85,11 @@ "pk": 4, "fields": { "nest_created_at": "2024-09-11T19:10:51.794Z", - "nest_updated_at": "2024-09-11T19:10:51.794Z", + "nest_updated_at": "2024-09-13T14:57:01.156Z", "node_id": "I_kwDOLD0cZ859uhUf", "title": "Solana Top Common Smart Contract Issues - QuillAudits", "body": "**### 1] Integer Overflow** \r\n**Rust offers different integer types with varying ranges:**\r\nu8 (8 bits): 0 to 255\r\nu16 (16 bits): 0 to 65535\r\nu32 (32 bits): 0 to 4294967295\r\nu64 (64 bits): 0 to 18446744073709551615\r\nu128 (128 bits): 0 to a very large positive number\r\n\r\nInteger overflow occurs when an arithmetic operation results in a value that exceeds the maximum representable value of the chosen integer data type. \r\nFor example, a = a + 1 — if an u8 integer a is changed to a value outside of its range, say 256 or -1, then an overflow/underflow will occur.\r\nThis can lead to unexpected behavior in smart contracts, as the value wraps around to the minimum value instead of throwing an error.\r\n\r\n**Example:-**\r\n```\r\n\r\nub fn pretty_time(t: u64) -> String { \r\nlet seconds = t % 60;\r\n let minutes = (t / 60) % 60;\r\n let hours = (t / (60 * 60)) % 24;\r\n let days = t / (60 * 60 * 24);\r\n\r\n\r\npub fn calculate_fee_from_amount(amount: u64, percentage: f32) -> u64 {\r\n if percentage <= 0.0 {\r\n return 0\r\n }\r\n let precision_factor: f32 = 1000000.0;\r\n let factor = (percentage / 100.0 * precision_factor) as u128; //largest it can get is 10^4\r\n (amount as u128 * factor / precision_factor as u128) as u64 // this does not fit if amount\r\n // itself cannot fit into u64\r\n\r\n```\r\n\r\n`Description`:\r\nIn the above functions mentioned, we are doing some calculations using the `*` and `/` operators, which can cause an overflow/underflow. If one uses the debug, build, it can cause a panic; however, in the release build, it will underflow silently.\r\n\r\n`Remediation`\r\n1. Instead of using the `*` operator, we recommend using the `checked_mul` function to prevent an overflow.\r\n2. Instead of using the `/` operator, we recommend using the `checked_div` function to prevent an underflow.\r\n\r\n\r\n### 2]Missing Account Verification \r\nThe \"Missing Account Verification\" issue in Rust smart contracts on the Solana blockchain typically arises when a smart contract fails to verify or check the ownership and structure of an account, potentially leading to vulnerabilities. This can occur due to various reasons, such as incorrect constructor arguments, failed contract verification, or lack of signature validation. This can lead to several problems, including Unauthorized Access, Denial-of-Service Attacks, and Loss of Funds.\r\n\r\n**Example:-**\r\n ```\r\n if !acc.sender.is_signer || !acc.metadata.is_signer {\r\n return Err(ProgramError::MissingRequiredSignature)\r\n }\r\n\r\ncreate.rs - Line 121:\r\n if a.rent.key != &sysvar::rent::id() ||\r\n a.token_program.key != &spl_token::id() ||\r\n a.associated_token_program.key != &spl_associated_token_account::id() ||\r\n a.system_program.key != &system_program::id()\r\n {\r\n return Err(ProgramError::InvalidAccountData)\r\n }\r\n\r\n```\r\n```\r\ncreate.rs - Line 85:\r\n if !a.sender.is_writable || //fee payer\r\n !a.sender_tokens.is_writable || //debtor\r\n !a.recipient_tokens.is_writable || //might be created\r\n !a.metadata.is_writable || //will be created\r\n !a.escrow_tokens.is_writable || //creditor\r\n !a.streamflow_treasury_tokens.is_writable || //might be created\r\n !a.partner_tokens.is_writable\r\n //might be created\r\n // || !a.liquidator.is_writable //creditor (tx fees)\r\n {\r\n return Err(SfError::AccountsNotWritable.into())\r\n }\r\n```\r\n\r\n```\r\ncreate.rs - Line 80:\r\n if !a.escrow_tokens.data_is_empty() || !a.metadata.data_is_empty() {\r\n return Err(ProgramError::AccountAlreadyInitialized)\r\n }\r\n\r\n```\r\n```\r\ncreate.rs - Line 99:\r\n let strm_treasury_pubkey = Pubkey::from_str(STRM_TREASURY).unwrap();\r\n let withdrawor_pubkey = Pubkey::from_str(WITHDRAWOR_ADDRESS).unwrap();\r\n let strm_treasury_tokens = get_associated_token_address(&strm_treasury_pubkey, a.mint.key);\r\n let sender_tokens = get_associated_token_address(a.sender.key, a.mint.key);\r\n let recipient_tokens = get_associated_token_address(a.recipient.key, a.mint.key);\r\n let partner_tokens = get_associated_token_address(a.partner.key, a.mint.key);\r\n\r\n\r\n if a.streamflow_treasury.key != &strm_treasury_pubkey ||\r\n a.streamflow_treasury_tokens.key != &strm_treasury_tokens ||\r\n a.withdrawor.key != &withdrawor_pubkey\r\n {\r\n return Err(SfError::InvalidTreasury.into())\r\n }\r\n```\r\n\r\n`Description`: Account verification is critical in Solana programs. Signed and writable accounts, as recommended, are to be verified before the business logic implementation, Also, the accounts data is verified to match the business logic requirements.\r\n\r\n\r\n### 3]Missing Signer check\r\nIf the account provided as a signer is not included as a signer in the transaction, Solana will throw a MissingRequiredSignature error. This helps prevent the unauthorized execution of instructions.\r\n\r\n**Example:-**\r\n\r\n```\r\n let init_vesting_account = create_account(\r\n &payer.key,\r\n &vesting_account_key,\r\n rent.minimum_balance(state_size),\r\n state_size as u64,\r\n &program_id,\r\n );\r\n\r\n```\r\n`Description`: In the above example, An instruction should only be available to a restricted set of entities; the program should verify that the call has been signed by the appropriate entity.\r\n\r\n`Remediation`: Verify that the payer account is a signer using the is_signer() function.\r\n\r\n\r\n### 4]Arithmetic Accuracy Deviation \r\nArithmetic accuracy deviations in Rust smart contracts on Solana can occur due to various factors, leading to unintended behaviour and potential security vulnerabilities. These deviations refer to situations where the outcome of mathematical operations within a smart contract differs from the expected result.\r\n\r\n**Example:-**\r\n \r\n```\r\nlet x = args.num_nfts_in_bucket;\r\n let y = args.num_nfts_in_lockers;\r\n let x_plus_y = (y as u64).checked_add(x as u64).unwrap();\r\n\r\n let numerator = args\r\n .locker_duration\r\n .checked_mul(y as u64)\r\n .unwrap()\r\n .checked_mul(100)\r\n .unwrap()\r\n .checked_mul(LAMPORTS_PER_DROPLET)\r\n .unwrap();\r\n\r\nlet denominator = args.max_locker_duration.checked_mul(x_plus_y).unwrap();\r\n let raw_interest = numerator.checked_div(denominator).unwrap();\r\n\r\n```\r\n`Description`: In the above example,Consider a scenario in which x = 1000, y = 1, and max_duration = t ∗ 116 sec.\r\n• By those above conditions, a user can skip t seconds of duration for interest calculation. \r\n• Letlocker_duration=6, andtis6sec.\r\n• Interest=(6∗1∗100∗108)/(6∗116∗8640∗1001)=(int)0.997765=0\r\n\r\n`Remediation`: It is recommended to perform a ceiling division when calculating interest owed.\r\n\r\n\r\n\r\n### 5]Arbitrary signed program invocation \r\nThe Arbitrary Signed Program Invocation issue in Solana refers to a vulnerability in the token instruction code that allows invoking an arbitrary program instead of the real SPL-token program. This issue can lead to security risks and potential attacks on the Solana blockchain.\r\n\r\nIn Solana, a transaction consists of one or more instructions, an array of accounts to read and write data from, and one or more signatures. Instructions are the smallest execution logic on Solana and invoke programs that make calls to the Solana runtime to update the state. Programs on Solana don't store data/state; rather, data/state is stored in accounts. When a Solana program invokes another program, the callee's program ID is supplied to the call typically through one of the following two functions: invoke or invoke_signed. The program ID is the first parameter of the instruction. In the case of the token instruction, there was a vulnerability that allowed invoking an arbitrary program, which could lead to security risks and potential attacks.\r\n\r\n**Example:-**\r\n```\r\n#[program]\r\npub mod Issue_ASPI {\r\n use super::*;\r\n pub fn transfer_tokens(ctx: Context, amount: u64) -> ProgramResult {\r\n let token_program = ctx.accounts.token_program.clone();\r\n\r\n // Vulnerable: Allowing arbitrary program invocation\r\n invoke(&token_program, &ctx.accounts.token_account, &[amount.to_le_bytes().as_ref()]);\r\n Ok(())\r\n }\r\n}\r\n```\r\n`Description`: In The above example, The code invokes the token_program without validating its program ID. An attacker could supply a malicious program instead, leading to unintended execution.\r\nExploitation:\r\nThe attacker supplies a malicious program ID as token_program.\r\nYour program invokes the malicious program, potentially granting it unintended control over tokens or other assets.\r\n\r\n`Remediation`: To avoid such vulnerabilities and attacks in general, it is essential to ensure that the program ID is correctly checked and validated before executing any instructions\r\n\r\n### 6] Solana account confusions \r\n​​In your Solana smart contracts, remember that users can provide any type of account as input. Don't assume ownership alone guarantees the account matches your expectations. Always verify the account's data type to avoid security vulnerabilities. Solana programs often involve multiple account types for data storage. Checking account ownership isn't enough. Validate every provided account's data type against your intended use.\r\n\r\n**Example:-**\r\n```\r\nprocessor.rs - Line 44\r\n let vesting_account_key = Pubkey::create_program_address(&[&seeds], &program_id).unwrap();\r\n if vesting_account_key != *vesting_account.key {\r\n msg!(\"Provided vesting account is invalid\");\r\n return Err(ProgramError::InvalidArgument);\r\n }\r\n```\r\n\r\n`Description`: In the above example,The address generated using create_program_address is not guaranteed to be a valid program address off the curve. Program addresses do not lie on the ed25519 curve and, therefore, have no valid private key associated with them, and thus, generating a signature for it is impossible. There is about a 50/50 chance of this happening for a given collection of seeds and program ID.\r\n\r\n`Remediation`: To generate a valid program address using a specific seed, use find_program_address function, which iterates through multiple bump seeds until a valid combination that does not lie on the curve is found.\r\n\r\n### 7]Error not handled \r\nUnhandled errors in Rust Solana programs pose a significant threat to their security and functionality. These errors can lead to unexpected behavior, program crashes, and even potential financial losses. When an error occurs, the program can either handle it gracefully or leave it unhandled. Unhandled errors are those that the program doesn't explicitly handle and recover from.\r\n\r\n**Example:-**\r\n\r\n```\r\npub mod my_program {\r\n use super::*;\r\n\r\n pub fn transfer_tokens(ctx: Context, amount: u64) -> ProgramResult {\r\n // Validate inputs\r\n if amount == 0 {\r\n return Err(ProgramError::InvalidInput); // Explicit error for invalid amount\r\n }\r\n // ... (rest of the transfer logic)\r\n // Handle potential errors from other operations\r\n let transfer_result = spl_token::transfer(\r\n /* ... transfer parameters ... */\r\n );\r\n match transfer_result {\r\n Ok(()) => {\r\n // Transfer successful\r\n }\r\n Err(error) => {\r\n // Handle transfer error appropriately\r\n return Err(error);\r\n }\r\n }\r\n\r\n Ok(())\r\n }\r\n```\r\n\r\n`Description`: In the Above Example, there's a potential \"error not handled\" issue. After calling spl_token::transfer, which presumably performs a token transfer operation, the code checks the result with a match statement. If the transfer operation fails, an error is caught, but the error is simply returned without any further handling or logging.\r\n\r\n`Remediation`: In order to mitigate the \"error not handled\" issue, it's recommended to include proper error handling mechanisms, such as logging the error, rolling back any state changes, or taking appropriate actions based on the specific error. Simply returning the error without any additional handling may lead to undesired behavior and make it difficult to identify and debug issues during the execution of the program.\r\n\r\n\r\n", + "summary": "The issue outlines several common vulnerabilities found in Solana smart contracts, each accompanied by a description, examples, and remediation suggestions. \n\n1. **Integer Overflow**: Arithmetic operations can lead to overflow/underflow errors. Use `checked_mul` and `checked_div` functions for safe calculations.\n\n2. **Missing Account Verification**: Failing to verify account ownership can lead to unauthorized access and vulnerabilities. Ensure all accounts are properly verified before executing business logic.\n\n3. **Missing Signer Check**: An account not included as a signer can trigger an error. Always check that the required accounts are signers using the `is_signer()` function.\n\n4. **Arithmetic Accuracy Deviation**: Mathematical operations might yield unexpected results due to erroneous calculations. It is recommended to perform ceiling division for accurate interest calculations.\n\n5. **Arbitrary Signed Program Invocation**: Invoking a program without validating its ID can lead to attacks. Always validate the program ID before invocation.\n\n6. **Solana Account Confusions**: Verify not just the ownership of accounts but also their data types to prevent security vulnerabilities.\n\n7. **Error Not Handled**: Unhandled errors can cause crashes or unintended behavior. Implement proper error handling to manage and log errors adequately.\n\nTo address these issues, developers should adopt safer coding practices, validate inputs thoroughly, and implement robust error handling.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-solana-programs-top-10/issues/1", @@ -108,10 +112,11 @@ "pk": 5, "fields": { "nest_created_at": "2024-09-11T19:10:55.756Z", - "nest_updated_at": "2024-09-11T19:10:55.756Z", + "nest_updated_at": "2024-09-13T14:57:04.466Z", "node_id": "I_kwDOLD0PbM6ByuKa", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. It suggests that a concise summary outlining the project's purpose, features, and goals would enhance understanding for potential users and contributors. The next step would be to draft and include a clear and informative description in the project's documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-pryingdeep/issues/2", @@ -134,10 +139,11 @@ "pk": 6, "fields": { "nest_created_at": "2024-09-11T19:11:09.490Z", - "nest_updated_at": "2024-09-11T19:11:09.490Z", + "nest_updated_at": "2024-09-13T14:57:15.402Z", "node_id": "I_kwDOLDUp6s6ByuIQ", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. It suggests that a clear and concise description would enhance understanding and engagement with the project. To address this, a suitable project description should be drafted and added.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-de-addiction/issues/1", @@ -160,10 +166,11 @@ "pk": 7, "fields": { "nest_created_at": "2024-09-11T19:11:13.632Z", - "nest_updated_at": "2024-09-11T19:11:13.632Z", + "nest_updated_at": "2024-09-13T14:57:18.709Z", "node_id": "I_kwDOLDUacM6ByuFI", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context for users. To address this, a concise and informative description should be crafted and added to the project's documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-appsec-contract-builder/issues/1", @@ -186,10 +193,11 @@ "pk": 8, "fields": { "nest_created_at": "2024-09-11T19:11:17.662Z", - "nest_updated_at": "2024-09-11T19:11:17.662Z", + "nest_updated_at": "2024-09-13T14:57:21.987Z", "node_id": "I_kwDOLDUMl86ByuDS", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. It is suggested to draft a concise overview that outlines the project's purpose, functionality, and any key features.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-naivesystems-analyze/issues/2", @@ -212,10 +220,11 @@ "pk": 9, "fields": { "nest_created_at": "2024-09-11T19:11:21.832Z", - "nest_updated_at": "2024-09-11T19:11:21.832Z", + "nest_updated_at": "2024-09-13T14:57:25.220Z", "node_id": "I_kwDOLDUMEs6ByuCI", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description. This would help provide context and clarify the project's purpose and goals. It is suggested to draft a concise overview that highlights key features and objectives.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-sapkiln/issues/1", @@ -242,6 +251,7 @@ "node_id": "I_kwDOLDUCls6J0ix2", "title": "[Feature] We should spell check the ASVS PDF before it goes out", "body": "Related to this PR: #3 \r\n\r\nDue to technical jargon, this might be tricky in a workflow.", + "summary": "The issue highlights the need for a spell check of the ASVS PDF before its release. It points out that the presence of technical jargon could complicate the spell-checking process. To address this, a careful review and possibly an adjustment of the workflow may be necessary to ensure accuracy.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-thick-client-application-security-verification-standard/issues/4", @@ -270,6 +280,7 @@ "node_id": "I_kwDOLDUCls6J1Mmp", "title": "[Feature] Fix documents coming from v0.1", "body": "when we are happy with our v**0**.x, we should rework the github action and folder structure to come from v**1**.x", + "summary": "The issue highlights the need to address documents originating from version 0.1. It suggests that once the team is satisfied with version 0.x, they should revise the GitHub action and reorganize the folder structure to align with version 1.x. A rework is necessary to ensure a smooth transition to the new version.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-thick-client-application-security-verification-standard/issues/5", @@ -294,10 +305,11 @@ "pk": 12, "fields": { "nest_created_at": "2024-09-11T19:11:28.496Z", - "nest_updated_at": "2024-09-11T19:11:28.496Z", + "nest_updated_at": "2024-09-13T14:57:28.619Z", "node_id": "I_kwDOLDUCls6VlrRN", "title": "Should DLL Hijacking be a single item?", "body": "as suggested in #6 by @matreurai \r\n\r\n> Note : I've seen in TASVS-STORAGE a category specific to a specific vulnerability (DLL Hijacking). I think this should be modified as the testing standard is not made for specific vulnerabilities in my opinion. Additionally, these items are explaining a type of attack and do not provide any guidance to what to test or how to prevent/mitigate risks of such attacks. The equivalent in the Web Application Security Verification Standard would be to add an item as such:\r\n> \"Cross-Site Scripting Category - Blind cross-site scripting (XSS) is a variant of stored XSS where the malicious payload is executed in a different context or application than where it was originally injected.\".\r\n> I don't think this make sense in the context of a Verification Standard.", + "summary": "The issue discusses whether DLL Hijacking should be treated as a standalone item within the testing standard, as it currently appears in the TASVS-STORAGE category. The suggestion is to modify this approach since the existing items primarily describe the attack type without providing actionable guidance on testing or risk mitigation. The comparison is made to the Web Application Security Verification Standard, where specific attack variants are better integrated. It may be beneficial to revise the structure to enhance clarity and usability in verifying security standards.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-thick-client-application-security-verification-standard/issues/7", @@ -329,6 +341,7 @@ "node_id": "I_kwDOLDUCls6VoUff", "title": "Consider bringing in new items from V5 web ASVS for crypto ", "body": "We could expand our list, for example nonces/IV is one I'd really like to bring in: https://github.com/OWASP/ASVS/blob/master/5.0/en/0x14-V6-Cryptography.md", + "summary": "The issue suggests incorporating new items from the V5 web ASVS related to cryptography into the existing framework. Specifically, the author expresses interest in adding concepts like nonces and initialization vectors (IVs). It would be beneficial to review the linked ASVS document and determine which additional cryptographic items can be integrated into the current standards.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-thick-client-application-security-verification-standard/issues/8", @@ -355,10 +368,11 @@ "pk": 14, "fields": { "nest_created_at": "2024-09-11T19:12:15.554Z", - "nest_updated_at": "2024-09-11T19:12:15.554Z", + "nest_updated_at": "2024-09-13T14:58:05.082Z", "node_id": "I_kwDOKrhCTc6Byt6F", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. It would be beneficial to outline the project's purpose, features, and any relevant details to enhance understanding for potential users and contributors.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-antiforensics-project/issues/1", @@ -381,10 +395,11 @@ "pk": 15, "fields": { "nest_created_at": "2024-09-11T19:12:19.748Z", - "nest_updated_at": "2024-09-11T19:12:19.748Z", + "nest_updated_at": "2024-09-13T14:58:08.330Z", "node_id": "I_kwDOKrgTWc6Byt17", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description. Providing a clear and concise description will help users understand the purpose and functionality of the project. It is recommended to draft a brief overview that outlines the main features and goals of the project.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-in-xr/issues/1", @@ -411,6 +426,7 @@ "node_id": "I_kwDOKYMc786H6did", "title": "False negative related to SQL Injection", "body": "The endpoint: `https://brokencrystals.com/api/testimonials/count?query=%27` is vulnerable to an SQL injection\r\n\r\nThe endpoint does NOT return `50X` error when the SQL injection occurs, thus: `STATUS_CODE_FILTER` doesn't catch it\r\n\r\nI believe it would be a smart idea to look for common SQL errors such as:\r\n`' - unterminated quoted string at or near \"'\"`\r\n\r\nOther errors are listed here:\r\nhttps://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/07-Input_Validation_Testing/05-Testing_for_SQL_Injection\r\n\r\nI can't easily find one 'list' that has all the SQL errors", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OFFAT/issues/100", @@ -437,6 +453,7 @@ "node_id": "I_kwDOKYMc786IlNBg", "title": "Add automated tests", "body": "Currently each and every PR raised need to be tested manually. Add automated tests using pytest/unittest library which can test PR before merging using Github actions.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OFFAT/issues/106", @@ -463,6 +480,7 @@ "node_id": "I_kwDOKYMc786MIhVC", "title": "Support for HTTP/2", "body": "If I understand correctly, OFFAT does not currently work for HTTP/2? I tried to fuzz some API that uses HTTP/2, but OFFAT produces \r\n\r\n`RemoteDisconnected('Remote end closed connection without response')`\r\n\r\nIs support for this planned in the future?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OFFAT/issues/116", @@ -487,10 +505,11 @@ "pk": 19, "fields": { "nest_created_at": "2024-09-11T19:13:31.551Z", - "nest_updated_at": "2024-09-11T19:13:31.551Z", + "nest_updated_at": "2024-09-13T14:59:01.356Z", "node_id": "I_kwDOKYMc786S-W9h", "title": "Option to bypass host availability check", "body": "If the server now returns a 502 on the root request, the testing will stop. Some servers only reply to valid paths for request. It would be nice if we can have a flag to bypass this \"check whether host is available\". Further, it appears that this host check request is not passed through the provided http proxy using -p http://127.0.0.1:8080.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OFFAT/issues/131", @@ -517,10 +536,11 @@ "pk": 20, "fields": { "nest_created_at": "2024-09-11T19:14:15.683Z", - "nest_updated_at": "2024-09-11T19:14:15.683Z", + "nest_updated_at": "2024-09-13T14:59:15.920Z", "node_id": "I_kwDOKSOWo86Byt0T", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. It would be helpful to create a concise summary that outlines the project's purpose, features, and any relevant details for potential users or contributors.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ai-top-ten/issues/1", @@ -543,10 +563,11 @@ "pk": 21, "fields": { "nest_created_at": "2024-09-11T19:14:29.608Z", - "nest_updated_at": "2024-09-11T19:14:29.608Z", + "nest_updated_at": "2024-09-13T14:59:26.954Z", "node_id": "I_kwDOKLCek86Bytxf", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context about its purpose and functionality. To address this, a concise and informative description should be drafted and added to the project documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-memory-safety/issues/1", @@ -573,6 +594,7 @@ "node_id": "I_kwDOKLCYV85zp7QN", "title": "Bootloader Test Cases", "body": "A bootloader section would provide test case coverages at lower lever components that create chains of trust that secure devices at boot and their identity", + "summary": "The issue discusses the need for a dedicated section that outlines test cases for bootloaders. These test cases should focus on lower-level components that establish chains of trust, ensuring device security during boot and maintaining their identity. To address this issue, it would be beneficial to develop and document comprehensive test cases that cover these aspects.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-istg/issues/2", @@ -601,6 +623,7 @@ "node_id": "I_kwDOKLCYV85005W_", "title": "ATT&CK ICS, D3FEND & ISTG", "body": "Folks have asked about relation to adjacent frameworks like ATT&CK ICS, D3FEND (links below) and overlap of them with ISTG. \r\n\r\nhttps://attack.mitre.org/techniques/ics/\r\nhttps://attack.mitre.org/matrices/ics/ \r\nhttps://d3fend.mitre.org/\r\n\r\nCurious if the project has a perspective to share and if there are opportunities to partner or collaborate on mappings in the future?", + "summary": "The issue discusses the relationship between the ATT&CK ICS, D3FEND frameworks, and their overlap with ISTG. There is a request for insights on how these frameworks interrelate and an interest in exploring potential collaboration or mapping opportunities in the future. It may be beneficial to outline any existing connections or propose a plan for collaboration on mappings.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-istg/issues/3", @@ -629,6 +652,7 @@ "node_id": "I_kwDOKLCYV853SzuL", "title": "Create ISTG cover art and align on look/feel with MSTG where possible", "body": "MSTG is a mature flagship OWASP project with a large following and a steady flow of contributors maintaining the guide. Until ISTG has a similar maturity and following, it'll be challenging to keep up with MSTG. Although, we should aim to align where it make sense and build relationships with testing guide project leaders for support. \r\n\r\nObservations and opportunities to align\r\n- ISTG should have a similar cover to [MSTG's]( https://github.com/OWASP/owasp-mastg/blob/master/Document/Images/mstg_cover.png) \r\n- GitHub pages theme ([material](https://github.com/OWASP/owasp-istg)) should be similar \r\n- Add a download link to the checklist and other formats like PDF \r\n- Input validation category abbreviation detailed in https://github.com/OWASP/owasp-istg/issues/4", + "summary": "The issue discusses the need to create cover art for the ISTG (Mobile Security Testing Guide) that aligns in look and feel with the MSTG (Mobile Security Testing Guide). It highlights that while MSTG is a well-established project, ISTG should strive to align its branding and resources where feasible to foster relationships with MSTG project leaders for support.\n\nKey observations include:\n- Designing ISTG cover art to resemble the MSTG cover.\n- Ensuring the GitHub pages theme for ISTG is consistent with MSTG’s.\n- Adding a download link for the checklist and other formats like PDF.\n- Addressing a specific detail related to input validation category abbreviations.\n\nTo proceed, the team should focus on creating the cover art and aligning the GitHub pages theme accordingly. Additionally, they should implement the suggested download link and follow up on the input validation category issue.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-istg/issues/6", @@ -655,6 +679,7 @@ "node_id": "I_kwDOKLCYV86ASsrT", "title": "Test objective for analyzing build flags of compiled binaries", "body": "Add a test objective that analyzes compiled binaries build flags for PIE/ASLR, NX, etc. that aid in decompilation strategies for reversing.\r\n\r\nReferring to:\r\n\r\n- [ISTG-MEM-INFO-001](https://github.com/OWASP/owasp-istg/tree/main/src/03_test_cases/memory#disclosure-of-source-code-and-binaries-istg-mem-info-001)\r\n- [ISTG-FW-INFO-001](https://github.com/OWASP/owasp-istg/blob/main/src/03_test_cases/firmware/README.md#disclosure-of-source-code-and-binaries-istg-fw-info-001)\r\n", + "summary": "The issue suggests adding a test objective focused on analyzing the build flags of compiled binaries, specifically looking for attributes like Position Independent Execution (PIE), Address Space Layout Randomization (ASLR), and Non-Executable (NX) bits. These elements are crucial for developing decompilation strategies in reverse engineering efforts. To address this, it may be beneficial to reference existing test cases related to the disclosure of source code and binaries from the provided links.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-istg/issues/8", @@ -679,10 +704,11 @@ "pk": 26, "fields": { "nest_created_at": "2024-09-11T19:14:51.096Z", - "nest_updated_at": "2024-09-11T19:14:51.096Z", + "nest_updated_at": "2024-09-13T14:59:40.273Z", "node_id": "I_kwDOKHixBs6Bytup", "title": "Please add a description to this project", "body": "", + "summary": "The project currently lacks a description, which is essential for understanding its purpose and functionality. It is suggested to create a clear and concise overview that outlines the project's objectives, features, and usage instructions. This will help potential users and contributors better engage with the project.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-untrust/issues/1", @@ -705,10 +731,11 @@ "pk": 27, "fields": { "nest_created_at": "2024-09-11T19:15:24.498Z", - "nest_updated_at": "2024-09-11T19:15:24.498Z", + "nest_updated_at": "2024-09-13T15:00:06.635Z", "node_id": "I_kwDOJ_RyPc6Bytpb", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. It is important to create a concise and informative summary that outlines the project's purpose, features, and goals.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-api-governance/issues/1", @@ -731,10 +758,11 @@ "pk": 28, "fields": { "nest_created_at": "2024-09-11T19:15:28.512Z", - "nest_updated_at": "2024-09-11T19:15:28.512Z", + "nest_updated_at": "2024-09-13T15:00:10.085Z", "node_id": "I_kwDOJ8-yTs6NkH0A", "title": "Need to know the status", "body": "Hi Team,\r\n\r\nAny update on this project and where are we standing !. Let me know if need any help !\r\n\r\nThanks,\r\nJanibasha", + "summary": "The issue seeks an update on the project's current status. It indicates that assistance may be offered if needed. To move forward, a response detailing the project's progress and any required support would be beneficial.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-llm-prompt-hacking/issues/1", @@ -757,10 +785,11 @@ "pk": 29, "fields": { "nest_created_at": "2024-09-11T19:15:58.522Z", - "nest_updated_at": "2024-09-11T19:15:58.522Z", + "nest_updated_at": "2024-09-13T15:00:33.944Z", "node_id": "I_kwDOJx_i6s6Bytkp", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. It would be beneficial to draft a concise overview that outlines the project's purpose, features, and any relevant details to enhance understanding for users and contributors.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-kubefim/issues/1", @@ -783,10 +812,11 @@ "pk": 30, "fields": { "nest_created_at": "2024-09-11T19:16:09.384Z", - "nest_updated_at": "2024-09-11T19:16:09.384Z", + "nest_updated_at": "2024-09-13T15:00:42.697Z", "node_id": "I_kwDOJsDs1M6Bytiv", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. To address this, a concise and informative description should be crafted, outlining the project's purpose, features, and any relevant details that would help users understand its functionality.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-open-security-information-base/issues/2", @@ -813,6 +843,7 @@ "node_id": "I_kwDOJjxT185tlC3F", "title": "Missing app layers", "body": "One of the key ways LLMs/ generative aI is used is through chaining/ agents. Agents put in one of the biggest risks for applications based on LLM/ Generative AI. There are issues with users allowing machine access to agents to perform tasks and agents seem to work autonomously creating security holes.", + "summary": "The issue highlights concerns regarding the security risks associated with using agents in applications based on large language models (LLMs) and generative AI. Specifically, it points out that allowing machine access to these agents could lead to autonomous behavior that may create vulnerabilities. To address this, a review of the current implementation of agent access and security measures is necessary to mitigate potential risks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/117", @@ -830,8 +861,8 @@ 16 ], "labels": [ - 7, - 8 + 8, + 7 ] } }, @@ -844,6 +875,7 @@ "node_id": "I_kwDOJjxT185to50E", "title": "Remove LLM05: Supply Chain Vulnerabilities in Favor of LLM03: Training Data Poisoning and LLM07: Insecure Plugin Design", "body": "# **Background:**\r\n- As per published [v1.0](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/tree/main/1_0_vulns) of the OWASP Top 10 for Large Language Model Applications project, I believe we have a duplicate entry within `LLM05: Supply Chain Vulnerabilities`\r\n - The [LLM-05](https://llmtop10.com/llm05/) `LLM05: Supply Chain Vulnerabilities` entry largely overlaps with [LLM-03](https://llmtop10.com/llm03/) `LLM03: Training Data Poisoning`\r\n - The [LLM-05](https://llmtop10.com/llm05/) `LLM05: Supply Chain Vulnerabilities` entry also overlaps with the topic of plugins in [LLM07: Insecure Plugin Design](https://llmtop10.com/llm07/) `LLM07: Insecure Plugin Design`\r\n- This is causing confusion, inconsistency and leaves us not fully utilizing the power of the Top 10 list to include any new vulnerabilities surfaced or if we decide to look at some of the [vulnerables < v1.0](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/tree/main/Archive) which didn't make the cut when we revise [v.1.0.1](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/tree/main/1_0_1_vulns) of the project\r\n\r\n- If we also take the foundation of the OWASP Top 10 lists - [OWASP Top 10 API Security Risks – 2023](https://owasp.org/www-project-top-ten/) as an example, then it is clear (or at least ot a security professional) that even though Supply Chain is not listed as a vulnerability but an organization referencing and basing their Web Application Security stack on the OWASP Top 10, certainly still need to take the whole concept of Supply Chain (via SBOM, however) is still extremely important to that software stack.\r\n- When we threat model an LLM Application, we have to ensure that are some pre-requisites taken into consideration which make them out of scope for us to focus specifically on risks and vulnerabilities present in the area we are assessing. These may be explicitly stated or inherently understood and is not listed as a top risk during the exercise.\r\n\r\n# **Proposed Changes**\r\n- Whilst I agree Supply Chain is an important topic in any security considerations, only the important **true top 10 vulnerabilities** require each subset referencing the supply-chain if it is valid in that specific area - I.E, Training Data Poisoning, Plugins etc. \r\n - I do not believe we need an entry soley dedicated to Supply Chain itself and each entry that has a Supply Chain as an example attack scenario should reference the supply chain topic here", + "summary": "The issue raises concerns about the redundancy of the entry \"LLM05: Supply Chain Vulnerabilities\" within the OWASP Top 10 for Large Language Model Applications. It argues that this entry overlaps significantly with \"LLM03: Training Data Poisoning\" and \"LLM07: Insecure Plugin Design,\" leading to confusion and inconsistency. The author suggests that while supply chain vulnerabilities are important, they should be referenced within relevant entries rather than having a separate category. \n\nTo address this, the proposed change is to remove the redundant supply chain entry and ensure that any mention of supply chain vulnerabilities is integrated into the other related categories. This would streamline the list and enhance clarity regarding the top vulnerabilities in LLM applications.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/119", @@ -879,6 +911,7 @@ "node_id": "I_kwDOJjxT185wOkr7", "title": "Sensitive Information Disclosure", "body": "In summary, this “vulnerability” is problematic because it mostly doesn’t represent a root cause, but a result or symptom. In the 2021 OWASP Top 10, they reoriented from symptoms to root causes. They actually renamed sensitive information exposure to cryptographic failure to focus on root causes.\r\n\r\nThere is, however, one aspect of this vulnerability that seems to be a true root cause. Regardless of the input it gets, large language model outputs are unpredictable. That unpredictability doesn’t necessarily come from training data poisoning, third-party data sources, or prompt injection. It comes from the stochastic nature and natural language processing capabilities of the large language model as well as ambiguities and inaccuracies inherent in natural language. I would argue that this aspect is a key point mentioned in this section that should be carried forward, however, **I believe it makes more sense in Overreliance**. \r\n", + "summary": "The issue discusses the classification of a vulnerability related to sensitive information disclosure, emphasizing that it is more of a symptom rather than a root cause. The author references the OWASP Top 10's shift in focus from symptoms to root causes, suggesting that the unpredictability of large language model outputs is a significant root cause. This unpredictability stems from the stochastic nature of these models and the inherent ambiguities in natural language, which may not be directly linked to typical concerns like data poisoning or prompt injection. It is proposed that this aspect should be integrated into the discussion on Overreliance instead. \n\nTo address this issue, it may be necessary to reevaluate how vulnerabilities are categorized, particularly in relation to the unpredictability of language models.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/140", @@ -910,6 +943,7 @@ "node_id": "I_kwDOJjxT185w0GmG", "title": "Recommended Updates to Excessive Agency", "body": "Recommend \r\ntrialed instead of trialled\r\n\r\nfavor instead of favour\r\n\r\netc. instead of etc\r\n\r\nAdd 'to' to this phrase: Limit the permissions that LLM plugins/tools are granted to other systems the minimum necessary\r\nSo it reads: Limit the permissions that LLM plugins/tools are granted to other systems to the minimum necessary\r\n\r\nConsider reworking the following:\r\nExcessive Agency is the vulnerability that enables damaging actions to be performed in response to unexpected/ambiguous outputs from an LLM (regardless of what is causing the LLM to malfunction; be it hallucination/confabulation, direct/indirect prompt injection, malicious plugin, poorly-engineered benign prompts, or just a poorly-performing model). The root cause of Excessive Agency is typically one or more of: excessive functionality, excessive permissions or excessive autonomy.\r\n\r\nTo be:\r\nExcessive Agency is the vulnerability that enables damaging actions to be performed in response to unexpected/ambiguous outputs from an LLM (regardless of what is causing the LLM to malfunction; be it hallucination/confabulation, direct/indirect prompt injection, malicious plugin, poorly-engineered benign prompts, or just a poorly-performing model). Excessive agency is a vulnerability of excessive functionality, permissions, and/ or autonomy. This differs from Insecure Output Handling which is concerned with insufficient scrutiny of LLM outputs.\r\n \r\n \r\nConsider these adjustments to Common Examples of Vulnerability\r\n1. Excessive Functionality: An LLM agent has access to plugins which include functions that are not needed for the intended operation of the system. For example, a developer needs to grant an LLM agent the ability to read documents from a repository, but the 3rd-party plugin they choose to use also includes the ability to modify and delete documents.\r\n2. Excessive Functionality: A plugin that was trialed during the development phase was dropped in favor of a better alternative, but the original plugin remains available to the LLM agent.\r\n3. Excessive Permissions: An LLM plugin has permissions on other systems that are not needed for the intended operation of the application. E.g., a plugin intended to read data connects to a database server using an identity that not only has SELECT permissions, but also UPDATE, INSERT and DELETE permissions.\r\n4. Excessive Permissions: An LLM plugin that is designed to perform operations on behalf of a user accesses downstream systems with a generic high-privileged identity. E.g., a plugin to read the current user's document store connects to the document repository with a privileged account that has access to all users' files.\r\n5. Excessive Autonomy: An LLM-based application or plugin fails to independently verify and approve high-impact actions. E.g., a plugin that allows a user's documents to be deleted performs deletions without any confirmation from the user.\r\n\r\n\r\n\r\n\r\n", + "summary": "The issue suggests several updates to the documentation regarding Excessive Agency, including language changes and clarifications. \n\nRecommendations include replacing \"trialled\" with \"trialed,\" \"favour\" with \"favor,\" and adding \"to\" in a specified phrase for clarity. Additionally, a reworking of the definition of Excessive Agency is proposed to better differentiate it from Insecure Output Handling, emphasizing the role of excessive functionality, permissions, and autonomy as root causes.\n\nFurther adjustments are suggested for the section on Common Examples of Vulnerability, providing clearer examples of excessive functionality, permissions, and autonomy in LLM plugins. \n\nTo address this issue, implement the recommended edits and enhancements to the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/172", @@ -940,6 +974,7 @@ "node_id": "I_kwDOJjxT185w0Ix7", "title": "Consider changes/updates to insecure plugin design", "body": "Addition to the intro:\r\nSince plugins are, under normal circumstances, accessed only by the LLM, exploitation is typically a result of another vulnerability such as excessive agency or direct or indirect prompt injection. However, plugins are still responsible for protecting themselves since side-channel attacks can still occur.\r\n\r\n\r\nPossibly updates to Common Examples:\r\n1. A plugin accepts its input parameters in a single text field instead of distinct input parameters that can be validated and sanitized.\r\n3. A plugin accepts raw SQL or programming statements, which are more difficult to validate than distinct parameters.\r\n5. A plugin adheres to inadequate fine grained authorization controls. \r\n6. A plugin blindly trusts that the LLM output, which is the input to the plugin, correctly represents the expected output for the initial prompt. \r\n7. A plugin treats all LLM content as being created entirely by the user and performs any requested actions without requiring additional authorization.\r\n\r\nPossiblr changes to attack scenarios\r\n2. A plugin used to retrieve embeddings from a vector store accepts configuration parameters as a connection string without any validation. This allows an attacker to experiment and access other vector stores by changing names or host parameters and exfiltrate embeddings they should not have access to. \r\n3. A plugin accepts SQL WHERE clauses as advanced filters, which are then appended to the filtering SQL. This allows an attacker to stage a SQL attack.\r\n4. An attacker uses indirect prompt injection to exploit an insecure code management plugin that has no input validation and weak access control to transfer repository ownership and lock out the user from their repositories.", + "summary": "The issue discusses the need for updates to the design of insecure plugins, emphasizing the importance of self-protection against potential vulnerabilities. It suggests enhancing the introduction to highlight that while plugins are mainly accessed by the LLM, they must still safeguard against side-channel attacks.\n\nThe issue proposes revisions to the \"Common Examples\" section, focusing on various insecure practices such as inadequate input validation, reliance on raw SQL statements, insufficient authorization controls, and assumptions regarding the trustworthiness of LLM output.\n\nAdditionally, it outlines potential attack scenarios, emphasizing risks associated with accepting unvalidated configuration parameters, SQL injection vulnerabilities, and the dangers of indirect prompt injection leading to unauthorized actions, like repository ownership transfer.\n\nTo address these concerns, it may be necessary to implement stricter validation and sanitization measures for plugin inputs, enhance authorization controls, and ensure that plugins do not blindly trust LLM outputs.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/174", @@ -970,6 +1005,7 @@ "node_id": "I_kwDOJjxT185xxQ2q", "title": "Change from Vulnerabilities to Risks", "body": "With the realization that the top 10 focuses on the risks that vulnerabilities, I recommend changing our template and the content of the Top 10 to match.\r\n\r\nThe TL;DR \r\n\r\nFo to the [OWASP Top 10](https://owasp.org/www-project-top-ten/) page and the first 2 sentences read:\r\n> The OWASP Top 10 is a standard awareness document for developers and web application security. It represents a broad consensus about the most critical security risks to web applications.\r\n\r\nMy detailed reasoning is in this document: [risks-vs-vulnerabilities.md](https://github.com/Bobsimonoff/LLM-4-Applications-Commentary/blob/main/docs/risks-vs-vulnerabilities.md)", + "summary": "The issue suggests changing the terminology from \"Vulnerabilities\" to \"Risks\" in the context of the OWASP Top 10, as the focus is more on the risks associated with vulnerabilities. The author points to the OWASP Top 10 page, which defines it as a document highlighting critical security risks to web applications. A detailed rationale is provided in an accompanying document. \n\nTo address this issue, the template and content of the Top 10 should be updated to reflect this focus on risks rather than vulnerabilities.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/188", @@ -987,8 +1023,8 @@ 20 ], "labels": [ - 7, - 12 + 12, + 7 ] } }, @@ -1001,6 +1037,7 @@ "node_id": "I_kwDOJjxT185yPKW0", "title": "(2) Website Typos + Proposed Fixes", "body": "# Issue 1\r\n**# Where:** https://llmtop10.com/\r\n**# What:** In LLM06's \"feature box\" there is a typo inconsistent with other assets / grammar.\r\n**# Current:** \r\n\r\n>“***LLM’s*** may reveal confidential data...”\r\n\r\n**# Proposed:** \r\n>***LLMs*** (no apostrophe) may reveal confidential data...\r\n\r\n**# Justification:** \r\nGrammar / continuity (to match the PDF, Whitepaper, and Google Doc which all contain \"LLMs\"; the plural of LLM, without an apostrophe, for LLM06.\r\n\r\n# Issue 2 \r\n**# Where:** https://llmtop10.com/intro/\r\n**# What:** In the 1st sentence there is a typo.\r\n**# Current:** \r\n> “The frenzy ***of*** interest ***of*** Large Language Models (LLMs) following ***of*** mass-market pre- trained chatbots in late 2022 has been remarkable.”\r\n\r\n**# Proposed:** \r\n> The frenzy of interest in Large Language Models (LLMs) following the mass marketing of pretrained chatbots in late 2022 has been remarkable.\r\n\r\n**# Justification:** \r\nTypo impacting clarity.\r\n\r\n# Notes:\r\n1. **Potential labels:** \"bug\" + \"website\" **—PS:** It's my understanding I can't assign a \"Label\" because I am not a repo \"Contributor\", or else I'd add the labels; please let me know if that's not the case.\r\n2. Great project / community, hope this helps. :heart:", + "summary": "The issue highlights two typos found on the website https://llmtop10.com/. \n\n**Issue 1:** In the feature box for LLM06, \"LLM’s\" should be corrected to \"LLMs\" to ensure grammatical consistency with other materials. \n\n**Issue 2:** On the introduction page, the first sentence contains multiple occurrences of \"of\" that disrupt clarity. It should be revised to read: \"The frenzy of interest in Large Language Models (LLMs) following the mass marketing of pretrained chatbots in late 2022 has been remarkable.\"\n\nFor resolution, it is suggested to correct the identified typos on the website. Additionally, appropriate labels such as \"bug\" and \"website\" could be added to the issue for better tracking.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/196", @@ -1030,6 +1067,7 @@ "node_id": "I_kwDOJjxT185yc-Fn", "title": "Overreliance: Prevention could include Human in the loop", "body": "Since overreliance occurs with humans or systems overly rely on LLM outputs, I think human in the loop is still valid as a how to prevent measure. ", + "summary": "The issue discusses the concern of overreliance on large language model (LLM) outputs by humans or systems. It suggests that incorporating a human in the loop could be an effective preventive measure. To address this, it may be beneficial to explore strategies for integrating human oversight into workflows that utilize LLMs.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/199", @@ -1060,6 +1098,7 @@ "node_id": "I_kwDOJjxT185ydD7j", "title": "template for example attack scenarios change for consistency", "body": "\r\nCondensing the template a bit to demonstrate the issue. I think the Example Attack Scenarios should change .... see below:\r\n\r\n### Common Examples of Risk\r\n\r\n1. Example 1: Specific instance or type of this risk.\r\n\r\n### Prevention and Mitigation Strategies\r\n\r\n1. Prevention Step 1: A step or strategy that can be used to prevent the risk or mitigate its effects.\r\n\r\n### Example Attack Scenarios\r\n\r\nold\r\n> Scenario #1: A detailed scenario illustrating how an attacker could potentially exploit this risk, including the attacker's actions and the potential outcomes.\r\n\r\nnew \r\n> 1. Scenario #1: A detailed scenario illustrating how an attacker could potentially exploit this risk, including the attacker's actions and the potential outcomes.\r\n", + "summary": "The issue discusses the need for consistency in the formatting of the \"Example Attack Scenarios\" section of a template. The suggestion is to modify the existing format by changing the numbering style for scenarios to align with the rest of the template. The proposed change is to replace the old format with a new one that begins the scenarios with a numerical listing. To implement this, the formatting across the template should be reviewed and adjusted accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/200", @@ -1090,6 +1129,7 @@ "node_id": "I_kwDOJjxT1850jeQz", "title": "Merge summary file into the Vulnerabilities files", "body": "Currently the summary of each risk is in a single file that is separate from the actual risk details. This causes a disconnect when the risk is updated. Above description in the template for each risk I would like to see the summary section. Then at production time all the summary sections can be grabbed and put into a single file for PDF generation. ", + "summary": "The issue highlights the need to integrate the summary sections of risk descriptions into the Vulnerabilities files to ensure coherence and ease of updates. The proposed solution is to modify the template for each risk to include the summary section, allowing for the aggregation of all summaries into a single file for PDF generation during production. Implementing this change will improve the accessibility and usability of the risk documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/225", @@ -1120,6 +1160,7 @@ "node_id": "I_kwDOJjxT18509gbo", "title": "Enhance the OWASP LLM Applications Project with a Related Patterns Functionality", "body": "IMO, we should look to provide a glossary or [`CAPEC`](https://capec.mitre.org/) approach to the OWASP LLM Application vulnerabilities - Similar to the way it is done with The [OWASP Web Application](https://owasp.org/Top10/) standards framework, see \"[OWASP Related Patterns](https://owasp.org/www-community/attacks/)\"\r\n\r\nA typical CAPEC entry includes a detailed Execution Flow. This consists of 3 sections:\r\n\r\n- **Explore** – Guidance on what to look for that may indicate that the software may be vulnerable to this attack.\r\n- **Experiment** – Guidance on possible ways to test if the software may be vulnerable to this attack.\r\n- **Exploit** – Summary of possible ways to exploit the software if your experiments are successful.\r\nIn many of the CAPEC entries, there will also be an external mapping to one of three possible other data sources:\r\n\r\n[WASC Threat Classification 2.0](http://projects.webappsec.org/w/page/13246978/Threat%20Classification) – A comprehensive framework from The Web Application Security Consortium that categorizes and organizes key security threats to web applications to facilitate standardizing threat reporting and response.\r\n[ATT&CK Related Patterns](https://attack.mitre.org/techniques/enterprise/) – A curated set of adversary behavior descriptors collected by MITRE, providing invaluable insights into the techniques used by threat actors to compromise and maneuver within systems.\r\n[OWASP Related Patterns](https://owasp.org/www-community/attacks/) – A set of techniques that attackers use to exploit the vulnerabilities in applications.\r\n\r\nKudos to **SilverStr** for the awesome [blog post](https://danaepp.com/adversarial-thinking-for-bug-hunters) which triggered my inspiration for us to adopt this", + "summary": "The issue suggests enhancing the OWASP LLM Applications Project by incorporating a Related Patterns functionality, akin to the CAPEC framework. This enhancement would involve creating a glossary that outlines vulnerabilities in a structured manner, featuring sections on exploration, experimentation, and exploitation, similar to CAPEC entries. It also proposes external mappings to other relevant threat classification systems, such as WASC and MITRE's ATT&CK. To move forward, the project team should consider developing a detailed framework that aligns with these suggestions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/230", @@ -1151,6 +1192,7 @@ "node_id": "I_kwDOJjxT18513f0Q", "title": "Enhancement Suggestion: Add RAG to the main diagram", "body": "Retrieval augmented generation (RAG) is technique to enrich LLMs with own data. It has become very popular as it lowers the complexity entry to enriching input in LLM apps, allows for better access controls as opposed to fine tuning, and is known to reduce hallucination (see https://www.securityweek.com/vector-embeddings-antidote-to-psychotic-llms-and-a-cure-for-alert-fatigue/) see also the excellent [Samsung paper on enterprise use of GenAI and the role of RAG](https://arxiv.org/ftp/arxiv/papers/2309/2309.01105.pdf).\r\n\r\nRAG creates its own security risks and adds to the attack surface. yet, the diagram only includes fine tuning, **We should add explicitly RAG as part of our diagram and annotate it with related LLM items**. \r\nSome useful links:\r\n**architectural approaches**\r\nAzure: https://github.com/Azure/GPT-RAG \r\nAWS SageMaker: https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-customize-rag.html\r\nAWS Bedrock RAG workshop: https://github.com/aws-samples/amazon-bedrock-rag-workshop\r\n\r\n**security concerns:**\r\n[Security of AI Embeddings explained](https://ironcorelabs.com/ai-encryption/)\r\n[Anonymity at Risk? Assessing Re-Identification Capabilities of Large Language Models](https://arxiv.org/abs/2308.11103)\r\n[Embedding Layer: AI (Brace For These Hidden GPT Dangers)](https://predictivethought.com/embedding-layer-ai-brace-for-these-hidden-gpt-dangers/)\r\n", + "summary": "The issue suggests an enhancement to include Retrieval Augmented Generation (RAG) in the main diagram, as it is a valuable technique for enriching LLMs with data. RAG can lower the complexity of using LLM applications, improve access control compared to fine-tuning, and help mitigate hallucination issues. However, it also introduces new security risks.\n\nCurrently, the diagram only reflects fine-tuning, and the proposal is to explicitly integrate RAG, along with annotations related to LLMs. Several resources provided include architectural approaches from Azure and AWS, as well as security concerns related to AI embeddings. \n\nTo address this issue, the main task would be to update the diagram to incorporate RAG and its associated implications.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/240", @@ -1182,6 +1224,7 @@ "node_id": "I_kwDOJjxT18513jts", "title": "Create Threat Model and Discuss RAG with its security risks for LLM", "body": "Retrieval augmented generation (RAG) is a technique to enrich LLMs with the apps/org own data. It has become very popular as it lowers the complexity entry to enriching input in LLM apps, allows for better access controls as opposed to fine-tuning, and is known to reduce hallucination (see https://www.securityweek.com/vector-embeddings-antidote-to-psychotic-llms-and-a-cure-for-alert-fatigue/) see also the excellent [Samsung paper on enterprise use of GenAI and the role of RAG](https://arxiv.org/ftp/arxiv/papers/2309/2309.01105.pdf).\r\n\r\n**Currently, we have occasional references to RAG but we should create a threat model and discuss this on its own and the impact on the LLM top** 10. As RAG becomes a distinct enterprise PATTERN it also creates its own security risks and expands the attack surface with:\r\nThis includes \r\n- a second process of generating embeddings that can vary in complexity and can be using the main or secondary LLM\r\n- vector database\r\n- the use of embeddings in prompts and responses \r\n- use of other services (eg Azure Cognitive Services) or specialised plugins, for instance, the OpenAI upsert retrieval plugin for semantic search plugin(https://github.com/openai/chatgpt-retrieval-plugin)\r\n\r\n\r\n### _Some useful links_**\r\n**architectural approaches**\r\nAzure: https://github.com/Azure/GPT-RAG\r\nAWS SageMaker: https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-customize-rag.html\r\nAWS Bedrock RAG workshop: https://github.com/aws-samples/amazon-bedrock-rag-workshop\r\n\r\n**security concerns**\r\n[Security of AI Embeddings explained](https://ironcorelabs.com/ai-encryption/)\r\n[Anonymity at Risk? Assessing Re-Identification Capabilities of Large Language Models](https://arxiv.org/abs/2308.11103)\r\n[Embedding Layer: AI (Brace For These Hidden GPT Dangers)](https://predictivethought.com/embedding-layer-ai-brace-for-these-hidden-gpt-dangers/)", + "summary": "The issue discusses the need to create a comprehensive threat model for Retrieval Augmented Generation (RAG), a technique that enhances large language models (LLMs) with organizational data. Despite its benefits in simplifying access to data, improving access controls, and reducing hallucinations, RAG introduces unique security risks and expands the attack surface. Key concerns include the complexity of generating embeddings, the security of vector databases, and the integration of external services and plugins.\n\nTo address these issues, it is recommended to systematically evaluate the security implications of RAG and outline a detailed threat model that considers its impact on LLMs. This would involve analyzing the security of architectural approaches and reviewing existing literature on potential vulnerabilities related to AI embeddings.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/241", @@ -1199,9 +1242,9 @@ 17 ], "labels": [ - 7, + 19, 12, - 19 + 7 ] } }, @@ -1214,6 +1257,7 @@ "node_id": "I_kwDOJjxT18516Vr_", "title": "LLM07 - Insecure Plugin Design - Mitigation/How to Prevent Enhancements", "body": "I believe [LLM07](https://llmtop10.com/llm07/) could benefit from some or all of these mitigation methods to be included in the vulnerability:\r\n\r\n- Human in the Loop: A plugin should not be able to invoke another plugin (by default), especially plugins with high stakes operations and to ensure that generated content meets quality and ethical standards.\r\n- It should be transparent to the user which plugin will be invoked, and what data is sent to it. Possibly even allow modifying the data before its sent.\r\n- A security contract and threat model for plugins should be created, so we can have a secure and open infrastructure where all parties know what their security responsibilities are. \r\n- An LLM application must assume plugins cannot be trusted (e.g. direct or indirect prompt injection), and similarly plugins cannot blindly trust LLM application invocations (example: confused deputy attack)\r\n- Regularly perform red teaming and model serialization attacks with thorough benchmarking and reporting of input and outputs.\r\n- Plugins that handle PII and/or impersonate the user are high stakes.\r\n- Isolation. Discussed before that follows a Kernel LLM vs. Sandbox LLMs could help. \r\n\r\nThese mitigation techniques are primarily focused towards combatting indirect prompt injection, but should pretty much be a defacto standard. I also think there should be some sort of statement or wording such as \"**Plugins should never be inheriently trusted**\".\r\n\r\nResource and inspiration kudos to [embracethered](https://embracethered.com/blog/posts/2023/chatgpt-cross-plugin-request-forgery-and-prompt-injection./).", + "summary": "The issue discusses potential enhancements for mitigating vulnerabilities related to the LLM07 - Insecure Plugin Design category. Key suggestions include:\n\n- Implementing a \"Human in the Loop\" approach to prevent plugins from invoking one another by default, especially for high-stakes operations.\n- Ensuring transparency regarding which plugins are invoked and what data is transmitted, allowing users to modify this data.\n- Establishing a security contract and threat model to clarify security responsibilities among all parties involved.\n- Assuming that plugins cannot be fully trusted, and vice versa, to prevent issues like prompt injection and confused deputy attacks.\n- Conducting regular red teaming and model serialization attacks with comprehensive benchmarking and reporting.\n- Recognizing that plugins dealing with Personally Identifiable Information (PII) or impersonation are particularly high-risk.\n- Considering isolation strategies, such as separating Kernel LLMs from Sandbox LLMs.\n\nThese measures primarily address indirect prompt injection and should be adopted as standard practice. A proposed statement emphasizes that \"Plugins should never be inherently trusted.\" Further exploration and implementation of these techniques are encouraged.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/242", @@ -1244,6 +1288,7 @@ "node_id": "I_kwDOJjxT1852ArkM", "title": "Enhancement Suggestion: Enhance Diagram to Include LLM Architecture Types and Technologies", "body": "To enhance our project and the next diagram artifact version we added for v1.1, I think we should include information about traditional REST API and websocket architecture into the diagram and how this interoperates with LLM state within Generative AI.\r\n\r\nThis architecture can be recon'd by an adversary in attempt to perform prompt injection or other techniques to exploit vulnerabilities within the LLM application.\r\n\r\n**Reference Examples:**\r\n\r\n- [amigoscode](https://amigoscode.com/)\r\n![API-Architecture-Designs-amigoscode](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/assets/104169244/2d512d6a-a040-4869-907f-0fbcddacc1c3)\r\n- https://livebook.manning.com/book/building-chatbots-with-microsoft-bot-framework-and-nodejs/chapter-6/v-4/", + "summary": "The issue proposes an enhancement for the project's next diagram artifact version by incorporating information on traditional REST API and websocket architecture. This addition aims to illustrate how these architectures interact with the LLM state within Generative AI, highlighting potential vulnerabilities, such as prompt injection. It suggests referencing existing examples to inform these enhancements. To address this, the team may need to analyze and integrate relevant architecture types and their implications for security in the upcoming diagram version.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/243", @@ -1275,6 +1320,7 @@ "node_id": "I_kwDOJjxT1852Kpmq", "title": "Add the definitions to the documents/site", "body": "The definitions currently live in the wiki, and aren't included in either the PDF or the website. We should fix that. ", + "summary": "The issue highlights the need to incorporate definitions from the wiki into the official documents and website, as they are currently missing from both the PDF and online content. To address this, the definitions should be extracted from the wiki and integrated into the relevant sections of the documents and website.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/244", @@ -1305,6 +1351,7 @@ "node_id": "I_kwDOJjxT1852i51h", "title": "LLM-01: Adding example of Prompt injection in ReAct agents", "body": "I propose to add a prompt injection attack scenario specific to organizations that are looking into implementing autonomous ReAct based agents to expose to customers, such as those that can be be built with Langchain. In these agents, it is possible to leverage prompt injection to inject Thoughts, Actions and Observations that alter the behaviour of the agent, but not via a direct jailbreak (i.e.: deviation from the system prompt) - instead they do so by altering the external reality of the agent.\r\n\r\n**Reference and attack example:**\r\nhttps://labs.withsecure.com/publications/llm-agent-prompt-injection\r\n\r\nProbably better understood in action in this short 30-sec video: https://www.linkedin.com/posts/withsecure_prompt-injection-for-react-llm-agents-ugcPost-7125756992341618688-ZFQ5\r\n\r\nSuggested text for the example to add: \"_An autonomous LLM agent on a web shop assists users in managining orders. It can access order details and issue refunds by using a set of tools via the ReAct (Reason+Act) framework. An attacker injects forged Thoughts, Actions and Observations into the LLM context via prompt injection. This tricks the LLM into thinking that the user has ordered an item they have not and that this item is eligible for a refund. Under these false premises, the agent proceeds to issue a refund to the malicious user for an order they never placed._\"\r\n\r\nIt's debatable though where such a scenario would fit: it's an injection attack, but not a direct jail break. The \"LLM08: Excessive Agency\" vulnerability definitely has an overlap, so does the the \"LLM07: Insecure Plugin Design\" to an extent. In this scenario mentioned by the article the agent should not be able to issue a refund if the original order doesn't exist or if information doesn't match the records.\r\n\r\n\r\n", + "summary": "The issue proposes adding an example of a prompt injection attack tailored for organizations implementing autonomous ReAct-based agents, particularly those built with Langchain. The attack involves manipulating the agent's behavior by altering its understanding of external reality rather than using a direct jailbreak. An illustrative scenario is provided where an attacker injects false information into a web shop's order management agent, leading it to issue a refund for a non-existent order. The discussion raises questions about the appropriate categorization of this attack, noting potential overlaps with existing vulnerabilities. To enhance security, it is suggested that agents should validate information against actual records before executing actions like refunds. \n\nIt may be helpful to incorporate this example into the documentation or relevant vulnerability discussions to better illustrate the risks associated with prompt injection in ReAct agents.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/246", @@ -1335,6 +1382,7 @@ "node_id": "I_kwDOJjxT1855Ah2v", "title": "LLM05 - dead link", "body": "**Description**, there is a dead link (https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/blob/main/1_1_vulns/InsecurePluginDesign.md) Insecure Plugin Design.\r\n\r\n**Prevention and Mitigation Strategies**, **item number 7**, there is a dead link (https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/blob/main/1_0_vulns/Training_Data_Poisoning.md) Training Data Poisoning.", + "summary": "The issue reports dead links in the documentation for \"Insecure Plugin Design\" and \"Training Data Poisoning.\" The links provided are non-functional, which likely hinders access to important information. To resolve the issue, the links should be updated or fixed to direct users to the correct resources.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/254", @@ -1367,6 +1415,7 @@ "node_id": "I_kwDOJjxT1856-VBt", "title": "LLM10 - Additional bullet point that doesn't exist in Markdown files", "body": "In LLM10 in the Common Examples of Vulnerability section of LLM10 there is an entire bullet point in there that doesn't exist in the markdown files for v1.1\r\n\r\n![Screenshot 2024-01-02 at 18 00 08](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/assets/759591/e3ffbfa3-a57f-435f-aca1-7bf25142a1a5)\r\n", + "summary": "The issue highlights a discrepancy where a bullet point present in the \"Common Examples of Vulnerability\" section of LLM10 is missing from the markdown files for version 1.1. To address this, it may be necessary to update the markdown files to include the missing bullet point to ensure consistency.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/258", @@ -1399,6 +1448,7 @@ "node_id": "I_kwDOJjxT1856-VSh", "title": "LLM03 - Numbering of Prevention and Mitigation Strategies in PDF does not match Markdown", "body": "The Prevention and Mitigation Strategies in the Markdown file for LLM10 goes up to higher however in the PDF it only goes up to 7 and has more sub points. \r\n\r\nhttps://github.com/OWASP/www-project-top-10-for-large-language-model-applications/blob/main/1_1_vulns/LLM03_TrainingDataPoisoning.md\r\n\r\n![Screenshot 2024-01-02 at 18 17 24](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/assets/759591/3875f45f-b9c1-4d9e-920a-f34ffadb5be2)\r\n", + "summary": "The issue highlights a discrepancy between the numbering of Prevention and Mitigation Strategies in the Markdown file and the corresponding PDF for LLM03. The Markdown document lists strategies beyond number 7, while the PDF only goes up to 7 and includes additional sub points. To resolve this, the numbering in the PDF should be aligned with the Markdown file to ensure consistency.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/259", @@ -1431,6 +1481,7 @@ "node_id": "I_kwDOJjxT1858fhgF", "title": "Add CloudBorne and CloudJacking Attacks to LLM-05 Supply Chain - CVE-2023-4969", "body": "Remember, an issue is not the place to ask questions. You can use our [Slack channel](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/wiki) for that, or you may want to start a discussion on the [Discussion Board](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/discussions).\r\n\r\n### When reporting an issue, please be sure to include the following:\r\n- [X ] Before you open an issue, please check if a similar issue already exists or has been closed before.\r\n- [X ] A descriptive title and apply the specific **LLM-0-10** label relative to the entry. See our [available labels](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/labels).\r\n- [X ] A description of the problem you're trying to solve, including *why* you think this is a problem\r\n- [X ] If the enhancement changes current behavior, reasons why your solution is better\r\n- [ ] What artifact and version of the project you're referencing, and the location (I.E OWASP site, llmtop10.com, repo)\r\n- [ ] The behavior you expect to see, and the actual behavior\r\n\r\n#### Steps to Reproduce\r\n-------------------------------------------\r\n\r\nNA\r\n\r\n#### What happens?\r\n-------------\r\n\r\nNA\r\n\r\n#### What were you expecting to happen?\r\n----------------------------------\r\n\r\nWithin the current [LLM entry](https://llmtop10.com/llm05/), I think supply-chain needs to cover cloudborne and cloudjacking attacks, IE GPU and cloud providers in further detail which the entry currently does not cover and is applicable to all companies and AI developers who depend on cloud resources.\r\nI added an article of interest below which I feel would back up this entry' resources section.\r\n\r\n#### Any logs, error output, etc?\r\n----------------------------\r\n\r\nNA\r\n\r\n#### Any other comments?\r\n-------------------\r\n[LeftoverLocals: Listening to LLM responses through leaked GPU local memory](https://blog.trailofbits.com/2024/01/16/leftoverlocals-listening-to-llm-responses-through-leaked-gpu-local-memory/)\r\n\r\n- [NA] Slack post link (if relevant)\r\n\r\n#### What versions of hardware and software are you using?\r\n----------------------------------------\r\n\r\nNA", + "summary": "The issue suggests that the current LLM-05 Supply Chain entry (CVE-2023-4969) should be updated to include details about CloudBorne and CloudJacking attacks, specifically in the context of GPU and cloud providers. This addition is deemed important for all companies and AI developers relying on cloud resources, as the topic is currently underrepresented. The submitter has provided a supportive article to enhance the resources section. To address this, the relevant updates and references should be incorporated into the existing entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/264", @@ -1448,8 +1499,8 @@ 22 ], "labels": [ - 10, - 17 + 17, + 10 ] } }, @@ -1462,6 +1513,7 @@ "node_id": "I_kwDOJjxT186DHV8Y", "title": "Updating Prompt's logo + others", "body": "Update the Prompt Security logo on the footer across all the site with the attached one\r\nOn this page https://llmtop10.com/contributors/, under 'Core Team' add Prompt Security as the company associated with Itamar Golan\r\nOn this page https://llmtop10.com/contributors/, under 'Contributors' add Prompt Security as the company associated with Lior Drihem\r\nOn this page https://llmtop10.com/contributors/, under 'Organizations', update Prompt's logo as well with the attached one\r\n![Black (1)](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/assets/164334254/e03695a1-19fe-4bbc-8b90-42ae6838f8e5)\r\n![Black Mono (1)](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/assets/164334254/bb81f753-1e05-4b29-b1b3-823ee370da4c)\r\n", + "summary": "The issue involves several updates related to the Prompt Security logo and its association with team members on the contributors page of the website. Specifically, the tasks include: \n\n1. Updating the Prompt Security logo in the footer across the entire site with the provided new logo.\n2. Adding Prompt Security as the company associated with Itamar Golan under the 'Core Team' section on the contributors page.\n3. Adding Prompt Security as the company associated with Lior Drihem under the 'Contributors' section on the same page.\n4. Updating the organization section with the new Prompt logo as well.\n\nTo address this, the necessary updates to the logo and contributor associations need to be implemented on the specified pages.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/277", @@ -1479,8 +1531,8 @@ 28 ], "labels": [ - 14, - 21 + 21, + 14 ] } }, @@ -1493,6 +1545,7 @@ "node_id": "I_kwDOJjxT186EeoHH", "title": "[suggestion enhancement] references of prompt injection -> jailbreaking, call out Many-Shot Jailbreaking explicitly", "body": "Remember, an issue is not the place to ask questions. You can use our [Slack channel](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/wiki) for that, or you may want to start a discussion on the [Discussion Board](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/discussions).\r\n\r\n### When reporting an issue, please be sure to include the following:\r\n- [X] Before you open an issue, please check if a similar issue already exists or has been closed before.\r\n- [X] A descriptive title and apply the specific **LLM-0-10** label relative to the entry. See our [available labels](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/labels).\r\n- [X] A description of the problem you're trying to solve, including *why* you think this is a problem\r\n- [X] If the enhancement changes current behavior, reasons why your solution is better\r\n- [X] What artifact and version of the project you're referencing, and the location (I.E OWASP site, llmtop10.com, repo)\r\n- [X] The behavior you expect to see, and the actual behavior\r\n\r\n#### Steps to Reproduce\r\n-------------------------------------------\r\n1. NA\r\n2. …\r\n3. …\r\n\r\n#### What happens?\r\n-------------\r\n\r\n`2_0_vulns/LLM01_PromptInjection.md` [here](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/blob/main/2_0_vulns/LLM01_PromptInjection.md)\r\n\r\ni think we should segregate [Basics of prompt injection](https://hiddenlayer.com/research/prompt-injection-attacks-on-llms/#Basics-of-prompt-injection), [Jailbreaking](https://hiddenlayer.com/research/prompt-injection-attacks-on-llms/#Jailbreaking), [Prompt Leaking](https://hiddenlayer.com/research/prompt-injection-attacks-on-llms/#Prompt-Leaking), [Prompt Hijacking](https://hiddenlayer.com/research/prompt-injection-attacks-on-llms/#Prompt-Hijacking) and [Indirect Injections](https://hiddenlayer.com/research/prompt-injection-attacks-on-llms/#Indirect-Injections) as separate entities\r\n\r\nsince context windows are now longer and more powerful, Many-Shot Jailbreaking is a common technique which was not the case back in `v1.0` of our project and therefore i think it should be called out as a technique or at least a reference\r\n\r\n#### What were you expecting to happen?\r\n----------------------------------\r\n\r\n#### Any logs, error output, etc?\r\n----------------------------\r\n\r\n#### Any other comments?\r\n-------------------\r\n\r\nPosted in #team-llm-promptinjection [here](https://owasp.slack.com/archives/C05FHTB7QH0/p1712142327925279)\r\n\r\nSolid references:\r\n- https://hiddenlayer.com/research/prompt-injection-attacks-on-llms/\r\n- https://www.anthropic.com/research/many-shot-jailbreaking\r\n\r\n![image](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/assets/104169244/db07dda7-76c3-4390-bb7a-570643323891)\r\n", + "summary": "The issue suggests enhancing the documentation by explicitly separating different types of prompt injection attacks, including Basics of Prompt Injection, Jailbreaking, Prompt Leaking, Prompt Hijacking, and Indirect Injections. It emphasizes the importance of recognizing Many-Shot Jailbreaking as a significant technique due to advancements in context windows. To address this, the documentation should be updated to acknowledge and detail this technique as a distinct reference. \n\nAction items may include revising the relevant documentation to incorporate these suggestions and ensure clarity on the different attack types.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/281", @@ -1521,6 +1574,7 @@ "node_id": "I_kwDOJjxT186FNc3W", "title": "Extend LLM-04: RAG poisoning with glitch tokens causes DoS", "body": "Remember, an issue is not the place to ask questions. You can use our [Slack channel](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/wiki) for that, or you may want to start a discussion on the [Discussion Board](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/discussions).\r\n\r\n### When reporting an issue, please be sure to include the following:\r\n- [x] Before you open an issue, please check if a similar issue already exists or has been closed before.\r\n- [x] A descriptive title and apply the specific **LLM-0-10** label relative to the entry. See our [available labels](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/labels).\r\n- [x] A description of the problem you're trying to solve, including *why* you think this is a problem\r\n- [x] If the enhancement changes current behavior, reasons why your solution is better\r\n- [x] What artifact and version of the project you're referencing, and the location (I.E OWASP site, llmtop10.com, repo)\r\n- [x] The behavior you expect to see, and the actual behavior\r\n\r\n#### Steps to Reproduce\r\n-------------------------------------------\r\n1. NA\r\n2. …\r\n3. …\r\n\r\n#### What happens?\r\n-------------\r\n`2_0_vulns/LLM04_ModelDoS.md` does not refer to DoS through glitch tokens injected into the prompt from a RAG solution. If a malicious actor is able to introduce broadly relevant information to the RAG database which includes glitch tokens for the given model that is used they are able to effectively run a denial of service attack for all users of the LLM.\r\n\r\nThe main issue is that there is no clear indication which token caused the model to glitch so there is no obvious way to automatically remediate such issues. Enterprises build large RAG databases from (mostly) user generated content (i.e. Confluence / SharePoint / ... ) and also update this content frequently. A malicious actor could therefore easily introduce new content to the RAG database, including but not limited to glitch tokens which effectively cause a Denial of Service situation for all end users of the application.\r\n\r\n\r\n#### What were you expecting to happen?\r\n----------------------------------\r\n…\r\n\r\n#### Any logs, error output, etc?\r\n----------------------------\r\n\r\n\r\n#### Any other comments?\r\n-------------------\r\nA \"normal\" glitch token attack doesn't pose a significant threat as it only renders the current user session / context unusable. However through a poisoned RAG database a malicious user can inject these tokens into some/many/most/ if not all conversations, thus causing a broad service outage.\r\n\r\n#### Sources talking about the issue\r\n-------------------\r\n1. https://www.youtube.com/watch?v=WO2X3oZEJOA\r\n2. https://www.lesswrong.com/posts/aPeJE8bSo6rAFoLqg/solidgoldmagikarp-plus-prompt-generation\r\n3. https://www.lesswrong.com/posts/8viQEp8KBg2QSW4Yc/solidgoldmagikarp-iii-glitch-token-archaeology\r\n\r\n- [x] Slack post link (if relevant)\r\nhttps://owasp.slack.com/archives/C05FN2GDUE7/p1712761308419699\r\n\r\n#### What versions of hardware and software are you using?\r\n----------------------------------------\r\n**Operating System:** …\r\n**Browser:** …\r\n\r\n- [x] Chrome\r\n- [x] Firefox\r\n- [x] Edge\r\n- [x] Safari 11\r\n- [x] Safari 10\r\n- [x] IE 11\r\n", + "summary": "The issue discusses the vulnerability related to the LLM-04 model, specifically how RAG poisoning with glitch tokens can lead to a denial of service (DoS) for all users. It highlights that malicious actors can inject glitch tokens into the RAG database, which can disrupt the model's functionality across multiple user sessions. The current documentation does not address this potential exploitation, leaving a gap in understanding how to identify or remediate such attacks effectively. It suggests the need for enhancements to the documentation and possibly the model to mitigate these risks. Addressing this vulnerability is crucial to safeguard enterprise applications that rely on user-generated content for their RAG databases.", "state": "open", "state_reason": "reopened", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/283", @@ -1538,8 +1592,8 @@ 30 ], "labels": [ - 17, - 24 + 24, + 17 ] } }, @@ -1552,6 +1606,7 @@ "node_id": "I_kwDOJjxT186FWvRN", "title": "setup automation for triage of issues against the project board", "body": "Remember, an issue is not the place to ask questions. You can use our [Slack channel](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/wiki) for that, or you may want to start a discussion on the [Discussion Board](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/discussions).\r\n\r\n### When reporting an issue, please be sure to include the following:\r\n- [X] Before you open an issue, please check if a similar issue already exists or has been closed before.\r\n- [X] A descriptive title and apply the specific **LLM-0-10** label relative to the entry. See our [available labels](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/labels).\r\n- [X] A description of the problem you're trying to solve, including *why* you think this is a problem\r\n- [X] If the enhancement changes current behavior, reasons why your solution is better\r\n- [X] What artifact and version of the project you're referencing, and the location (I.E OWASP site, llmtop10.com, repo)\r\n- [X] The behavior you expect to see, and the actual behavior\r\n\r\n#### Steps to Reproduce\r\n-------------------------------------------\r\n1. NA\r\n2. …\r\n3. …\r\n\r\n#### What happens?\r\n-------------\r\nright now, we currently don't have any automation for triage against our issue board which is slowing down inefficiency, productivity and not working to its full capability\r\n\r\n#### What were you expecting to happen?\r\n----------------------------------\r\ni'm going to setup automation (ie assignment of issues based on labels) to members of the core team to enhance efficiency\r\n\r\n#### Any logs, error output, etc?\r\n----------------------------\r\nNA\r\n\r\n#### Any other comments?\r\n-------------------\r\nhere is my prior PR for the GH issue template\r\n- https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/pull/256\r\n\r\n- [X] Slack post link (if relevant)\r\nhttps://owasp.slack.com/archives/C26MQBZ0B/p1712754527768819\r\nhttps://owasp.slack.com/archives/C05CM6DUZ1D/p1712754555606069\r\nneed github PAT/fine-grained token from OWASP org admins\r\n\r\n\"image\"\r\n", + "summary": "The issue highlights the lack of automation for triaging issues on the project board, which is currently hindering efficiency and productivity. The author plans to implement automation, specifically by assigning issues based on labels to core team members. To proceed, there is a need for a GitHub Personal Access Token (PAT) or fine-grained token from OWASP organization administrators to enable the automation setup. It is suggested to address the automation requirement and obtain the necessary permissions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/285", @@ -1580,6 +1635,7 @@ "node_id": "I_kwDOJjxT186FfGU9", "title": "LLM-01 - Add canary tokens to Prevention and Mitigation Strategies", "body": "Remember, an issue is not the place to ask questions. You can use our [Slack channel](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/wiki) for that, or you may want to start a discussion on the [Discussion Board](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/discussions).\r\n\r\n### When reporting an issue, please be sure to include the following:\r\n- [x] Before you open an issue, please check if a similar issue already exists or has been closed before.\r\n- [x] A descriptive title and apply the specific **LLM-0-10** label relative to the entry. See our [available labels](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/labels).\r\n- [x] A description of the problem you're trying to solve, including *why* you think this is a problem\r\n- [x] If the enhancement changes current behavior, reasons why your solution is better\r\n- [x] What artifact and version of the project you're referencing, and the location (I.E OWASP site, llmtop10.com, repo)\r\n- [x] The behavior you expect to see, and the actual behavior\r\n\r\n#### Steps to Reproduce\r\n-------------------------------------------\r\n1. …\r\n2. …\r\n3. …\r\n\r\n#### What happens?\r\n-------------\r\nNot included\r\n\r\n#### What were you expecting to happen?\r\n----------------------------------\r\nInclude usage of canary tokens as a recommended mitigation strategy for prompt injection. Canary tokens can be used to detect system prompt leakage and to verify system prompt effectiveness / adherence.\r\n\r\n##### Proposed language\r\n--------------------------\r\n```\r\n6. Dynamically add a synthetic canary token to the model's system prompt and scan the models output for the canary token. If the token is leaked in the generation output this is a strong indicator for prompt injection that aims to dump the system prompt itself.\r\n7. Dynamically add a synthetic canary token to the model's system prompt and specifically instruct the model to include this token (must not be the same token as in 6.) at the end of every response. Check if the token is included in the models output message and remove the token before sending the response to the front-end component. If the token is not present, the model's system prompt was most likely overridden through means of prompt injection.\r\n```\r\n\r\n#### Any logs, error output, etc?\r\n----------------------------\r\n\r\n\r\n#### Any other comments?\r\n-------------------\r\n…\r\n\r\n- [x] Slack post link (if relevant)\r\nhttps://owasp.slack.com/archives/C05FHTB7QH0/p1712913754686959", + "summary": "The issue suggests adding canary tokens to the Prevention and Mitigation Strategies for prompt injection attacks. The proposal includes two main strategies: dynamically adding a synthetic canary token to the model's system prompt to detect prompt leakage, and instructing the model to include a different token in every response to verify the integrity of the system prompt. If the tokens are leaked or absent, it indicates potential prompt injection. To address this, the implementation of these canary tokens as a recommended mitigation strategy should be explored.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/288", @@ -1608,6 +1664,7 @@ "node_id": "I_kwDOJjxT186FnBJr", "title": "Drive standard spelling from Terminology glossary", "body": "Applicable to https://llmtop10.com/ and **Security & Governance Checklist** (from [here](https://owasp.org/www-project-top-10-for-large-language-model-applications/llm-top-10-governance-doc/LLM_AI_Security_and_Governance_Checklist-v1.1.pdf)).\r\n\r\nKey terms are not expressed consistently. Suggestion to establish a canonical spelling. Consider that first use might be different than subsequent use (for example, perhaps might want \"Large Language Model (LLM)\" for first use then \"LLM\" for subsequent use in varying scopes). \r\n\r\n**From the Top 10 website:**\r\n\r\n\"LLM\" variants include at least the following:\r\n1. Large Language Model in LLM01 and LLM04\r\n2. LLM in LLM02 and LLM10\r\n3. LLM's (for plural, with apostrophe) in LLM06 \r\n4. LLMs (for plural, no apostrophe) in LLM09\r\n \r\n**From the Security & Governance Checklist:**\r\n\r\n\"LLM\" variants include at least the following:\r\n1. Large Language Models (LLMs)\r\n2. LLMs (Large Language Models)\r\n3. LLM\r\n4. large language model\r\n6. Large Language Model\r\n\r\nThe varied use of LLM as a term is but one example. Non-exhaustive additional examples include: \r\n1. GenAI vs generative artificial intelligence (GenAI) vs generative AI\r\n3. AIML vs AI/ML\r\n\r\n~The terminology section could establish a preferred style. This need not preclude reestablishing terminology in context as makes sense, but suggesting it simply guide the preferred style among the many possibilities.~ Per comment from @GangGreenTemperTatum, there is already a [terminology section](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/wiki/Definitions) on the wiki. So the remaining point is that terms are expressed inconsistently.", + "summary": "The issue highlights the inconsistency in the usage of key terms related to \"Large Language Models\" (LLM) across the LLM Top 10 website and the Security & Governance Checklist. It suggests establishing a canonical spelling for these terms, noting that the first use may differ from subsequent mentions (e.g., \"Large Language Model (LLM)\" initially, followed by \"LLM\"). Specific examples of inconsistencies are provided, including variations in spelling and phrasing for \"LLM\" and related terms like GenAI and AIML. The suggestion is to create a preferred style guide within the existing terminology section to ensure consistency in term usage. It is recommended that a review of the terminology section be conducted to standardize expressions across the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/291", @@ -1637,6 +1694,7 @@ "node_id": "I_kwDOJjxT186FoHkV", "title": "ads test from https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/pull/292", "body": "ads test from https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/pull/292", + "summary": "The issue discusses the implementation of an ads test related to a previous pull request. It appears that there may be a need for clarification or further development of the test to ensure it aligns with the project's objectives. To address this, reviewing the relevant pull request and evaluating the current testing framework could be beneficial.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/293", @@ -1665,6 +1723,7 @@ "node_id": "I_kwDOJjxT186IrVLb", "title": "Translation Effort - Language-specific links to other OWASP resources", "body": "While translating the LLM Top 10 into German, @johannhartmann and I noticed that there are a few links embedded within the text (not just the references).\r\n\r\nFor example, LLM05 links to [A06:2021 - Vulnerable and Outdated Components](https://owasp.org/Top10/A06_2021-Vulnerable_and_Outdated_Components/). It also links to https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/blob/main/1_0_vulns/Training_Data_Poisoning.md. Furthermore, LLM06 links to the wiki at https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/wiki/Definitions.\r\n\r\nThis raises two questions:\r\n\r\n- Should links to other OWASP documents point to their English originals or to the translations (where available)?\r\n- Are there canonical links, or do we just point to the latest/current version? As some of them contain versions, this is a bit unclear.\r\n\r\nI couldn't find any guidance in the documentation regarding a common approach. It seems like the Spanish translation has kept the links as they are in the English version.\r\n\r\nHow should we proceed so that it is consistent across all translations? \r\n\r\n(Original post in [Slack](https://owasp.slack.com/archives/C05956H7R8R/p1715608819205709))", + "summary": "The issue highlights the need for clarity regarding the handling of links to other OWASP resources while translating the LLM Top 10 into German. It raises two main questions: whether links should point to the original English documents or available translations, and whether to use canonical links or the latest version of the documents. There’s a lack of guidance in the documentation on this matter, and the Spanish translation has maintained the original English links. A consistent approach across all translations is necessary. \n\nTo resolve this, a decision needs to be made on the linking strategy for translations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/310", @@ -1679,8 +1738,8 @@ "author": 32, "repository": 151, "assignees": [ - 22, - 26 + 26, + 22 ], "labels": [] } @@ -1690,10 +1749,11 @@ "pk": 60, "fields": { "nest_created_at": "2024-09-11T19:17:10.526Z", - "nest_updated_at": "2024-09-11T19:17:10.526Z", + "nest_updated_at": "2024-09-13T15:00:46.205Z", "node_id": "I_kwDOJjxT186QMuv1", "title": "cannot access editable source of v2 diagram - www-project-top-10-for-large-language-model-applications/2_0_vulns/artifacts /v2.0 - OWASP Top 10 for LLM Applications and Generative AI - LLM Application HLD - Presentation DLD.jpeg", "body": "Remember, an issue is not the place to ask questions. You can use our [Slack channel](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/wiki) for that, or you may want to consult the following Slack channels:\r\n\r\n- [#project-top10-for-llm](https://owasp.slack.com/archives/C05956H7R8R)\r\n- [#team-llm-web](https://owasp.slack.com/archives/C06RXVCQB1C)\r\n- [#team-llm_diagrams_and_visuals](https://owasp.slack.com/archives/C05L7TW8VCY)\r\n- Each vulnerabilities has its own dedicated Slack channel for discussions in the format of `#team-llm0X`, I.E ([#team-llm03_data_and_model_poisoning](https://owasp.slack.com/archives/C05F7JWFYBU))\r\n\r\n### When reporting an issue, please be sure to include the following:\r\n- [ ] Before you open an issue, please check if a similar issue already exists or has been closed before.\r\n- [ ] A descriptive title and apply the specific **LLM-0-10** label relative to the entry. See our [available labels](https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/labels).\r\n- [ ] A description of the problem you're trying to solve, including *why* you think this is a problem\r\n- [ ] If the enhancement changes current behavior, reasons why your solution is better\r\n- [ ] What artifact and version of the project you're referencing, and the location (I.E OWASP site, llmtop10.com, repo)\r\n- [ ] The behavior you expect to see, and the actual behavior\r\n\r\n#### Steps to Reproduce\r\n-------------------------------------------\r\n1. …\r\n2. …\r\n3. …\r\n\r\n#### What happens?\r\n-------------\r\n…\r\n\r\n#### What were you expecting to happen?\r\n----------------------------------\r\n…\r\n\r\n#### Any logs, error output, etc?\r\n----------------------------\r\n\r\n\r\n#### Any other comments?\r\n-------------------\r\n…\r\n\r\n- [ ] Slack post link (if relevant)\r\n\r\n#### What versions of hardware and software are you using?\r\n----------------------------------------\r\n**Operating System:** …\r\n**Browser:** …\r\n\r\n- [ ] Chrome\r\n- [ ] Firefox\r\n- [ ] Edge\r\n- [ ] Safari 11\r\n- [ ] Safari 10\r\n- [ ] IE 11\r\n", + "summary": "The issue reports an inability to access the editable source of a specific diagram related to the OWASP Top 10 for LLM Applications. The user is directed to use Slack channels for questions and to follow specific guidelines for reporting issues, including ensuring similar issues haven't been previously addressed, providing descriptive titles and labels, and detailing the problem, expected behavior, and any relevant logs.\n\nTo address this issue, it may be necessary to check the accessibility of the diagram's source or provide clearer instructions on how to access the editable version.", "state": "open", "state_reason": "reopened", "url": "https://github.com/OWASP/www-project-top-10-for-large-language-model-applications/issues/388", @@ -1718,10 +1778,11 @@ "pk": 61, "fields": { "nest_created_at": "2024-09-11T19:17:38.472Z", - "nest_updated_at": "2024-09-11T19:17:38.472Z", + "nest_updated_at": "2024-09-13T15:01:08.504Z", "node_id": "I_kwDOJZbJH86L0MUq", "title": "ERROR: No matching distribution found for pywin==306", "body": "Hello,\r\n\r\nI had an error while installing requirements.txt, see the screenshot below:\r\n\r\n![image](https://github.com/OWASP/SAPKiln/assets/32524544/2dacd340-1737-44fb-9d91-ea0a8d068a5f)\r\n\r\nhow can I solve it?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SAPKiln/issues/1", @@ -1744,10 +1805,11 @@ "pk": 62, "fields": { "nest_created_at": "2024-09-11T19:17:45.802Z", - "nest_updated_at": "2024-09-11T19:17:45.802Z", + "nest_updated_at": "2024-09-13T15:01:14.786Z", "node_id": "I_kwDOJZXuOs6Bythd", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. It suggests that including a detailed description will improve understanding for users and contributors. To address this, a clear and concise project description should be drafted and added to the repository.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-blockchain-appsec-standard/issues/1", @@ -1770,10 +1832,11 @@ "pk": 63, "fields": { "nest_created_at": "2024-09-11T19:18:21.161Z", - "nest_updated_at": "2024-09-11T19:18:21.161Z", + "nest_updated_at": "2024-09-13T15:01:39.679Z", "node_id": "I_kwDOJTHMcM6Bytfk", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description. To address this, it would be helpful to draft a clear and concise overview of the project's purpose, features, and goals.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-minefield/issues/2", @@ -1796,10 +1859,11 @@ "pk": 64, "fields": { "nest_created_at": "2024-09-11T19:18:41.667Z", - "nest_updated_at": "2024-09-11T19:18:41.667Z", + "nest_updated_at": "2024-09-13T15:01:56.557Z", "node_id": "I_kwDOJNCEE86Byta2", "title": "Please add a description to this project", "body": "", + "summary": "A request has been made to include a project description. It would be helpful to provide a clear and concise overview of the project's purpose, features, and functionality.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-cognito-catastrophe/issues/2", @@ -1826,6 +1890,7 @@ "node_id": "I_kwDOJJEsKM5qX4p_", "title": "Support VSCode editor", "body": "### Plugin release for VSCode\r\nIDE-Vulscanner is projected to be ide agonistic and supports the IntelliJ plugin for now. Given developers mostly use VSCode and IntelliJ ide's for day jobs it would be a good idea to release a VSCode plugin. ", + "summary": "The issue suggests developing a VSCode plugin for IDE-Vulscanner, which currently only supports IntelliJ. Since many developers primarily use VSCode, creating a plugin for this editor would enhance its usability. The next steps involve assessing the requirements and implementing a VSCode-compatible version of the plugin.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ide-vulscanner/issues/17", @@ -1857,6 +1922,7 @@ "node_id": "I_kwDOJJEsKM5qX4-F", "title": "Support multi language scan", "body": "### Enhance the plugin for multi-language scan \r\nAs of now plugin supports maven-based project scans and the ask is to support well-known program languages like Go, Python, Java, Gradle-based projects, etc.", + "summary": "The issue requests an enhancement of the plugin to enable multi-language scanning capabilities. Currently, the plugin only supports Maven-based projects, and the proposal is to extend support to include popular programming languages such as Go, Python, Java, and Gradle-based projects. To address this, the development of additional scanning features for these languages is needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ide-vulscanner/issues/18", @@ -1888,6 +1954,7 @@ "node_id": "I_kwDOJJEsKM5qX5On", "title": "Support vulnerable code linting", "body": "### Vul code linting\r\nThe bigger picture of this plugin is to support compile time linting of any code which might cause vulnerability to overall software.", + "summary": "The issue discusses the need for a plugin that enables compile-time linting to identify potential vulnerabilities in code. The goal is to enhance software security by catching issues early in the development process. To address this, the plugin should be developed to effectively analyze code for vulnerabilities during compilation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ide-vulscanner/issues/19", @@ -1915,10 +1982,11 @@ "pk": 68, "fields": { "nest_created_at": "2024-09-11T19:18:58.756Z", - "nest_updated_at": "2024-09-11T19:18:58.756Z", + "nest_updated_at": "2024-09-13T15:02:05.333Z", "node_id": "I_kwDOJJEsKM6Gx0SR", "title": "Add IntelliJ 2024.1 support", "body": "I would really love to also use the plugin in IntelliJ 2024.1\r\n\r\nCurrently it's not listed as available for this version. Haven't tried installing it manually tho.", + "summary": "The issue requests support for the plugin in IntelliJ 2024.1, as it is currently not listed as compatible with that version. It might be beneficial to test whether manual installation works, but official support would be ideal. Consider updating the plugin to ensure compatibility with IntelliJ 2024.1.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ide-vulscanner/issues/24", @@ -1941,10 +2009,11 @@ "pk": 69, "fields": { "nest_created_at": "2024-09-11T19:19:15.530Z", - "nest_updated_at": "2024-09-11T19:19:15.530Z", + "nest_updated_at": "2024-09-13T15:02:19.017Z", "node_id": "I_kwDOI9Ahm85r2IXl", "title": "Please consider Oracle manipulation", "body": "Hi there,\r\n\r\nThanks for this great repo. After reading the top 10 list, IMHO, please consider to add Oracle manipulation into this (some hacks happened).\r\n\r\nAbout Oracle manipulation:\r\n\r\n# Oracle manipulation\r\n## Desciption\r\nOracle manipulation is an attack that smart contract rely off-chain information on other services called oracles (eg. price). If the data was wrong it might lead to abnormal behavior.\r\n\r\n## Impact\r\nMight drain the pool from Defi protocol.\r\n\r\n## Steps to fix\r\nUse a decentralized oracle network, or time-weighted average price feed.\r\n\r\n## Example\r\nThe lending contract use the price from dex oracle, attacker make flash loan and manipulate the token price (drain one asset from the pool). After price manipulation, attacker can make loan from lending pool if the token price source is dex oracle and not validated properly.", + "summary": "The issue discusses the need to address Oracle manipulation in the repository. It highlights that this type of attack can exploit vulnerabilities in smart contracts that rely on off-chain data from oracles, potentially leading to significant losses in DeFi protocols. To mitigate this risk, it suggests implementing a decentralized oracle network or utilizing time-weighted average price feeds. A specific example is provided, illustrating how an attacker could exploit a lending contract by manipulating token prices through a decentralized exchange oracle. It is recommended to consider adding protections against such manipulation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-smart-contract-top-10/issues/4", @@ -1973,6 +2042,7 @@ "node_id": "I_kwDOI1zPn85hrZIL", "title": "Have a vulnerable AI project :-)?", "body": "Hi there!\r\nThis is a great project! Thank you for documenting it so nicely!\r\nTo make it more practical for those that might have trouble understanding some of the sources referenced: will there ever be a “vulnerable AI” project where we can make some of the attacks described more practical :-)?", + "summary": "The issue expresses appreciation for the project and its documentation while suggesting the creation of a \"vulnerable AI\" project. This would allow users to practically engage with the attacks described in the existing documentation. To address this, the team could consider developing a dedicated project focused on showcasing these vulnerabilities in a hands-on manner.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ai-security-and-privacy-guide/issues/6", @@ -1999,6 +2069,7 @@ "node_id": "I_kwDOI1zPn85pBZOf", "title": "Process podcast to see if anything new", "body": "https://www.devseccon.com/the-secure-developer-podcast/ep-134-the-five-pillars-of-mlsecops-with-ian-swanson", + "summary": "The issue discusses the need to process a specific podcast episode to extract any new insights or relevant information. It suggests reviewing the content of the episode titled \"The Five Pillars of MLSecOps\" to identify key takeaways that may be beneficial for the project. To address this, one should listen to the episode and summarize the main points or actionable ideas presented.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ai-security-and-privacy-guide/issues/10", @@ -2025,6 +2096,7 @@ "node_id": "I_kwDOI1zPn85pBZRG", "title": "Process article by Susanna Cox on ML ops controls", "body": "", + "summary": "The issue discusses the need to process an article on ML ops controls. It highlights the importance of implementing proper controls in machine learning operations to ensure efficiency and compliance. To move forward, the article should be reviewed and analyzed for key insights that can be incorporated into the existing framework or practices.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ai-security-and-privacy-guide/issues/11", @@ -2051,6 +2123,7 @@ "node_id": "I_kwDOI1zPn85pDIJy", "title": "Process ENISA presentations of AI security conference June 2023", "body": "eg 2.2-isabel-praca-enisa_aicybersecurityconference_v2", + "summary": "The issue pertains to the need for processing presentations from the ENISA AI Security Conference held in June 2023. Specifically, it references a particular presentation file (2.2-isabel-praca-enisa_aicybersecurityconference_v2) that requires attention. The next steps involve reviewing and possibly summarizing or extracting key insights from the mentioned presentation as part of the overall processing effort.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ai-security-and-privacy-guide/issues/12", @@ -2077,6 +2150,7 @@ "node_id": "I_kwDOI1zPn85rBCXC", "title": "Do gap analysis with responsible AI guide", "body": "https://www.considerati.com/static/default/files/Responsible%20AI%20guide.pdf", + "summary": "The issue discusses the need to conduct a gap analysis using the Responsible AI guide linked in the description. The goal is to identify discrepancies between current practices and the standards outlined in the guide. To address this, a detailed assessment of existing AI practices against the guidelines provided in the document should be performed. This will help in improving adherence to responsible AI principles.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ai-security-and-privacy-guide/issues/14", @@ -2099,10 +2173,11 @@ "pk": 75, "fields": { "nest_created_at": "2024-09-11T19:19:53.240Z", - "nest_updated_at": "2024-09-11T19:19:53.240Z", + "nest_updated_at": "2024-09-13T03:39:12.472Z", "node_id": "I_kwDOI1zPn858tKig", "title": "Controls in table format", "body": "Do we have risks and controls mapping described in table format? ", + "summary": "The issue inquires about the existence of a mapping of risks and controls presented in a table format. It suggests that such a format would be beneficial for clarity and organization. To address this, it may be necessary to create or find a detailed table that outlines the risks alongside their corresponding controls.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-ai-security-and-privacy-guide/issues/35", @@ -2125,10 +2200,11 @@ "pk": 76, "fields": { "nest_created_at": "2024-09-11T19:20:48.056Z", - "nest_updated_at": "2024-09-11T22:50:23.514Z", + "nest_updated_at": "2024-09-13T17:08:17.896Z", "node_id": "I_kwDOIhSPhM5YsL-r", "title": "Getting specific containers that fail the checker rule.", "body": "If any container violates the checker rule, the culprit must be busted. So, better to create a ContainerCheck Flow that will help identify the culprit. \r\n\r\nSteps \r\n1. Creating a Container Check Class\r\n2. Passing the Parent Workload via TinyDB Query Lambdas\r\n3. Collect all the Parent Workloads violation and child containers in a Static Variable or Otherwise. \r\n4. Ignore the TinyDB result and prioritize the ContainerCheck Class output", + "summary": "The issue discusses the need for a system to identify containers that violate a specific checker rule. The proposed solution involves creating a ContainerCheck Flow to pinpoint these violations. Key steps include developing a Container Check Class, utilizing TinyDB Query Lambdas to pass the Parent Workload, and collecting violations of Parent Workloads along with their child containers in a static variable. Additionally, it suggests that the output from the ContainerCheck Class should take precedence over the TinyDB results. Implementation of these steps is necessary to enhance the detection of rule violations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/KubeLight/issues/2", @@ -2158,6 +2234,7 @@ "node_id": "I_kwDOIhSPhM5YsMce", "title": "Read Me improvement ", "body": "1. Adding CI badges\r\n2. Adding Sqube Badge - CodeQuality and Security\r\n3. Adding Sponsor badge after permission. \r\n4. Add Documentation link [docs.kubelight.com](docs.kubelight.com)", + "summary": "The issue suggests several improvements for the README file, including the addition of CI badges, a Sqube badge for code quality and security, a sponsor badge pending permission, and a documentation link to docs.kubelight.com. To address this, the specified badges and links should be integrated into the README as outlined.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/KubeLight/issues/3", @@ -2186,6 +2263,7 @@ "node_id": "I_kwDOIhSPhM5YtOYI", "title": "Benchmark Module for Kubelight", "body": "### Description \r\nKubelight requires a new benchmark module that helps to run various benchmarks on cluster configuration.\r\n\r\n#### To Do \r\n- [ ] Create a List of possible benchmarking rules\r\n\r\n\r\n\r\n", + "summary": "The issue discusses the need for a new benchmark module for Kubelight to facilitate running various benchmarks on cluster configurations. The proposed task involves creating a list of possible benchmarking rules to guide the development of this module.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/KubeLight/issues/4", @@ -2214,6 +2292,7 @@ "node_id": "I_kwDOIhSPhM5YydY1", "title": "Kubernetes & Container Security", "body": "", + "summary": "The issue discusses concerns related to security in Kubernetes and container environments. It highlights potential vulnerabilities and the need for better security practices to protect applications running in these settings. Suggested actions may include implementing stricter access controls, regular security audits, and adopting best practices for container image management. Addressing these points can enhance the overall security posture of Kubernetes deployments.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/KubeLight/issues/5", @@ -2240,6 +2319,7 @@ "node_id": "I_kwDOIhSPhM5Y0ZR0", "title": "Exempting rules from default namespaces", "body": "Exempt to run rules in default namespaces \r\nex - default, kube-system. ", + "summary": "The issue discusses the need to exempt certain rules from being applied in default namespaces, specifically the \"default\" and \"kube-system\" namespaces. The proposed solution involves implementing a mechanism that allows these namespaces to bypass the standard rule application. To address this, the implementation of exclusion criteria for these specific namespaces should be considered.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/KubeLight/issues/6", @@ -2266,6 +2346,7 @@ "node_id": "I_kwDOIhSPhM5Y0nti", "title": "Decorator for exception handling in Checker", "body": "1. Add exception handling to all the methods of Container Checker\r\n2. Add exception handling so that flow will not be disrupted because of one issue. \r\n3. Global exception is not recommended, still over the remove it, add comment for the same. ", + "summary": "The issue discusses the need to enhance the Container Checker by implementing exception handling across all its methods. The goal is to ensure that the overall flow remains intact even if a single method encounters an error. It also mentions that while global exception handling is not preferred, it should be removed and accompanied by a comment explaining the rationale. \n\nTo address this, the implementation of individual exception handling for each method is required, along with documentation regarding the decision against global exception handling.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/KubeLight/issues/7", @@ -2297,6 +2378,7 @@ "node_id": "I_kwDOIhSPhM5Y5s4p", "title": "Memory and CPU Requests/Limit Check to be user input", "body": "As of now, we only check if the memory or CPU limit is set or not, better would be \r\n1. To take user input to validate the memory and CPU limits.\r\n2. Translate \"M\" and \"m\" for CPU and G, Gi, Mi, and M for RAM. Better to use 1000 conversion units for both.", + "summary": "The issue proposes enhancements to the current memory and CPU limit checks. Specifically, it suggests allowing user input for validating these limits and improving the unit translation process. The proposal includes using consistent conversion units: \"M\" and \"m\" for CPU, and \"G\", \"Gi\", \"Mi\", and \"M\" for RAM, with a preference for 1000 as the conversion standard. Implementation of these changes could improve usability and clarity.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/KubeLight/issues/8", @@ -2321,10 +2403,11 @@ "pk": 83, "fields": { "nest_created_at": "2024-09-11T19:21:03.949Z", - "nest_updated_at": "2024-09-11T19:21:03.949Z", + "nest_updated_at": "2024-09-13T15:03:32.572Z", "node_id": "I_kwDOIbsMfc6BytVG", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description. To address this, a concise and informative description outlining the project's purpose, features, and usage should be created and included.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-application-security-monitoring-standard/issues/1", @@ -2347,10 +2430,11 @@ "pk": 84, "fields": { "nest_created_at": "2024-09-11T19:21:27.404Z", - "nest_updated_at": "2024-09-11T19:21:27.404Z", + "nest_updated_at": "2024-09-13T15:03:51.410Z", "node_id": "I_kwDOITh-Lc6FWu_t", "title": "Associated Github Repo no longer exists", "body": "https://github.com/anil-yelken/Vulnerable-Flask-App\r\n\r\nNo longer exists.", + "summary": "The GitHub issue reports that the repository for the \"Vulnerable Flask App\" is no longer available. It may be necessary to either find an alternative repository for similar content or restore the original repository if possible.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-vulnerable-flask-app/issues/1", @@ -2377,6 +2461,7 @@ "node_id": "I_kwDOH7x8X85TQF_x", "title": "Create documentation", "body": "- [x] Depends on https://github.com/commjoen/wrongsecrets-ctf-party/issues/86 and on https://github.com/commjoen/wrongsecrets/issues/453 \r\n\r\nTodos:\r\n- [ ] have github pages in place\r\n- [ ] have helm release process in place\r\n- [ ] Have supportive Docker release scripts in place (both tagged&latest)\r\n- [ ] cleanup the guides to be limited on what we actually have (k8s, Azure, GCP & AWS)\r\n- [ ] document the details required to run K8s\r\n- [ ] document the details required to run AWS\r\n- [ ] document the details required to run GCP\r\n- [ ] document the details required to run Azure\r\n- [ ] document CTFD setup (Automated and manual!)\r\n", + "summary": "Create comprehensive documentation for the project. Follow these steps:\n\n1. Ensure that dependencies from the linked issues are resolved.\n2. Set up GitHub Pages for hosting the documentation.\n3. Establish a Helm release process to manage deployments effectively.\n4. Create Docker release scripts for both tagged and latest images to streamline container management.\n5. Clean up existing guides to focus on supported platforms: Kubernetes (K8s), Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS).\n6. Document specific configurations and requirements for running Kubernetes.\n7. Provide detailed instructions for setting up AWS.\n8. Document GCP setup requirements.\n9. Outline Azure setup instructions.\n10. Include a section on setting up CTFD, covering both automated and manual processes.\n\nBegin by creating a structure for the documentation and outlining the information needed for each platform. Then, tackle the GitHub Pages setup and Helm processes.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-ctf-party/issues/79", @@ -2407,6 +2492,7 @@ "node_id": "I_kwDOH7x8X85hW_Qg", "title": "Replace Request with something better maintained", "body": "Given 2 Security alerts regarding the NPM package `Request` we need to change it to something else.\r\nSteps to take: \r\n- [ ] Wait for kubernetes-client to be ready (See below for tracking)\r\n- [ ] migrate to the latest version of the kubernetes-client (this will include giving typed requests/responses to the k8s API)\r\n- [ ] see if we have Request anywhere else and remove it from there (start by uninstalling it ;-))", + "summary": "Replace the `Request` NPM package with a better-maintained alternative due to two security alerts. Follow these steps:\n\n1. **Monitor Kubernetes Client Development**:\n - Track the readiness of the `kubernetes-client` library to ensure compatibility and support.\n\n2. **Migrate to Latest Version**:\n - Update the project to utilize the latest version of the `kubernetes-client`.\n - Implement typed requests and responses for the Kubernetes API to enhance type safety and reduce errors.\n\n3. **Audit and Remove Request Usage**:\n - Search the codebase for any other instances of the `Request` package.\n - Uninstall the package from the project once identified and ensure all calls to it are replaced with the new alternative.\n\nStart by checking the current version of `kubernetes-client` and its release notes for breaking changes or migration guides. After that, proceed with the migration and code adjustments.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-ctf-party/issues/212", @@ -2435,6 +2521,7 @@ "node_id": "I_kwDOH7x8X85mb_ev", "title": "Embed vault server in safe container", "body": "We want to open the vault challenges to the ctf by adding a vault dev server to the teamnamespace by means of javascript. This requires:\r\n- [ ] vault container with deployment and service\r\n- [ ] Rbac extension for player to describe and read logs of pods/deployment\r\n- [ ] Nsp extension to make vault, desktop, and WrongSecrets talk to eachbother in same namespace\r\n- [ ] extend balancer to proxy to vault\r\n- [ ] Add initialisation JavaScript to give vault the right policies and contents\r\n- [ ] Extend tests\r\n- [ ] have a PVC for just vault isolated for each NS", + "summary": "Embed a Vault server in a safe container for the CTF. Follow these steps:\n\n1. **Create Vault Container**: Develop a deployment and service configuration for the Vault container within the team namespace. Ensure it adheres to best practices for container security.\n\n2. **Implement RBAC Extension**: Establish Role-Based Access Control (RBAC) permissions for players. This should allow them to describe and read logs of the pods and deployments related to the Vault server.\n\n3. **Develop NSP Extension**: Create a Network Service Proxy (NSP) extension that enables the Vault, desktop, and WrongSecrets applications to communicate within the same namespace effectively.\n\n4. **Extend Load Balancer**: Modify the existing load balancer to include functionality for proxying requests to the Vault server, ensuring it can handle traffic appropriately.\n\n5. **Initialize Vault with JavaScript**: Write initialization JavaScript that configures the Vault with necessary policies and preloads required contents for the CTF environment.\n\n6. **Enhance Testing**: Expand the testing framework to include tests specifically for the Vault server integration, ensuring all components function correctly together.\n\n7. **Set Up Persistent Volume Claim (PVC)**: Create a Persistent Volume Claim dedicated to the Vault, ensuring it is isolated for each namespace to maintain data integrity and security.\n\n**First Steps**:\n- Start by designing the deployment and service for the Vault container.\n- Set up the RBAC permissions structure.\n- Begin drafting the JavaScript initialization script to configure Vault correctly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-ctf-party/issues/250", @@ -2463,6 +2550,7 @@ "node_id": "I_kwDOH7x8X85m7_X3", "title": "Create proxy and websocket configuration configmap for wrongsecrets-balancer", "body": "Given we will have to upgrade the webtop often, we need to be able to easily adjust the requierd proxying in the wrongsecrets-balancer. That is why we need a configmap that contains a mapping for:\r\n- which urls should end up to a websocket upgrade to which service\r\n- which urls need to be proxied to which address\r\n\r\nso, something like:\r\n- websocket\r\n - servicename1:8080:\r\n incomingurl1\r\n incomingurl2\r\n incomingurl3\r\n - serviname2:3000\r\n incomingurl4\r\n incomingurl5\r\n - proxy\r\n - servicename1:8080:\r\n incomingurl6\r\n incomingurl7\r\n - servicename2:3000\r\n incomingurl8withwildcard\r\n - servicename3(CTFD)\r\n incomingurl9\r\n incomingurl10\r\n - servicename4(grafana)\r\n incomingurl10\r\n\r\nthe deployment should mount the configmap in so that the balancer its code should be able to read it. both the deployment and balancer code need to be adjusted.\r\nhttps://github.com/OWASP/wrongsecrets-ctf-party/blob/main/wrongsecrets-balancer/src/proxy/proxy.js#L106 should no longer be hardcoded but usse a configmap to load like above", + "summary": "Create a ConfigMap for the wrongsecrets-balancer to facilitate dynamic proxying and WebSocket upgrades. Define a structure in the ConfigMap that maps URLs to services for both WebSocket and proxy configurations.\n\n1. Define the ConfigMap structure:\n - Create entries for WebSocket upgrades:\n - Map incoming URLs to service names and ports.\n - Create entries for proxying:\n - Map incoming URLs to service names and ports, including wildcard support.\n\nExample structure:\n```yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: wrongsecrets-balancer-config\ndata:\n websocket: |\n servicename1:8080:\n - incomingurl1\n - incomingurl2\n - incomingurl3\n servicename2:3000:\n - incomingurl4\n - incomingurl5\n proxy: |\n servicename1:8080:\n - incomingurl6\n - incomingurl7\n servicename2:3000:\n - incomingurl8withwildcard\n servicename3(CTFD):\n - incomingurl9\n - incomingurl10\n servicename4(grafana):\n - incomingurl10\n```\n\n2. Adjust the deployment:\n - Mount the ConfigMap into the deployment so the balancer can access it.\n - Ensure the correct file path is used for the balancer code to read the configuration.\n\n3. Update the balancer code:\n - Refactor the hardcoded logic in `proxy.js` to read from the mounted ConfigMap instead of using hardcoded values.\n\n4. Test the implementation:\n - Verify that the application correctly reads the ConfigMap and routes requests as defined.\n\nBy following these steps, ensure seamless upgrades and modifications to the proxying behavior without changing the codebase each time.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-ctf-party/issues/252", @@ -2491,6 +2579,7 @@ "node_id": "I_kwDOH7x8X85qXfmQ", "title": "Create cypres test for full round trip (webtop &wrongsecrets)", "body": "We need to build cypress tests that check based on a Minikube env whether:\r\n- [ ] creating a team works: you can register a team, and then end up with having 2 buttons, that you can click to see the webtop and the wrongsecrets instance\r\n- [ ] joining a team works: you can join the same team as in the previous test again by giving the passscode and that end up with having 2 buttons, that you can click to see the webtop and the wrongsecrets instance\r\n- [ ] admin interface works", + "summary": "Create Cypress tests for full round trip in Minikube environment. Focus on the following functionalities:\n\n1. **Test Team Creation**:\n - Simulate team registration.\n - Verify that two buttons appear to access the webtop and wrongsecrets instances.\n\n2. **Test Team Joining**:\n - Join the previously created team using the passcode.\n - Ensure that the same two buttons for webtop and wrongsecrets instances are visible.\n\n3. **Test Admin Interface**:\n - Validate that the admin interface is functional and accessible.\n\n**First Steps**:\n- Set up a Minikube environment and ensure Cypress is installed.\n- Write test cases using Cypress commands to automate the above scenarios.\n- Use Cypress commands like `cy.visit()` to navigate to the necessary pages and `cy.get()` to interact with buttons.\n- Implement assertions to check for the existence of buttons and validate correct behavior of the admin interface.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-ctf-party/issues/284", @@ -2521,6 +2610,7 @@ "node_id": "I_kwDOH7x8X85z20rW", "title": "Upgrade to latest webtop ", "body": "Given that everything changed in the new KasmVNC setup for the latest Linux [webtop](https://docs.linuxserver.io/images/docker-webtop), we need to upgrade the balancer again.\r\nTasks in this ticket:\r\n- [ ] pin to a very recent version of the webtop\r\n- [ ] update the balancer to be compatible with the webtop", + "summary": "Upgrade to the latest webtop version. \n\n1. Pin the project to the most recent version of the webtop as specified in the [LinuxServer documentation](https://docs.linuxserver.io/images/docker-webtop).\n2. Review the changes in the new KasmVNC setup to understand the differences from the previous version.\n3. Update the load balancer configuration to ensure compatibility with the new webtop features and requirements.\n4. Test the integration of the updated balancer with the webtop to verify functionality.\n5. Document any changes made during the upgrade process for future reference.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-ctf-party/issues/371", @@ -2545,10 +2635,11 @@ "pk": 91, "fields": { "nest_created_at": "2024-09-11T19:22:13.123Z", - "nest_updated_at": "2024-09-11T22:53:47.233Z", + "nest_updated_at": "2024-09-13T17:07:25.631Z", "node_id": "I_kwDOH7x8X86SdOo1", "title": "Upgrade to the latest wrongsecrets (1.9.1)", "body": "- [x] update k8s to 1.30: https://github.com/OWASP/wrongsecrets-ctf-party/pull/666\r\n- [x] update all deps\r\n- [ ] enable the new challenge in javascript (e.g. add sealed-secrets items) and upgrade to 1.9.1 for wrongsecrets image\r\n - [ ] make 1.9.x where challenge 48 is enabled in CTF mode and update vault challenge CTF settings and vault-challenges can be temporary disabled via env-vars.\r\n - [ ] create javascript for ctf-party with new tests\r\n - [ ] release new balancer image\r\n- [ ] fix https://github.com/OWASP/wrongsecrets-ctf-party/issues/665\r\n- [ ] fix https://github.com/OWASP/wrongsecrets-ctf-party/pull/630\r\n- [ ] fix https://github.com/OWASP/wrongsecrets-ctf-party/pull/642\r\n- [ ] update and fix CTFd everywhere", + "summary": "Upgrade wrongsecrets to version 1.9.1. \n\n1. Ensure Kubernetes is updated to version 1.30 as per pull request #666.\n2. Update all dependencies to their latest versions.\n3. Enable the new JavaScript challenge by integrating sealed-secrets items and upgrading to the wrongsecrets image version 1.9.1.\n - Implement challenge 48 in CTF mode in version 1.9.x, modifying vault challenge settings. Allow temporary disabling of vault challenges through environment variables.\n - Develop new JavaScript tests for the CTF party.\n - Release an updated balancer image.\n4. Address and resolve the following issues:\n - Fix issue #665.\n - Resolve pull request #630.\n - Address pull request #642.\n5. Update and fix CTFd across the board.\n\nBegin by updating Kubernetes and dependencies, then proceed to implement the new JavaScript challenge and tackle the listed issues sequentially.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-ctf-party/issues/664", @@ -2578,6 +2669,7 @@ "node_id": "I_kwDOH7x8X86SdPL2", "title": "Fix bug in admin interface", "body": "when a namespace is loaded and we want to manage it via the admin env we get various errors in the ui. these need to be fixed.", + "summary": "Fix the bug in the admin interface that occurs when loading a namespace for management in the admin environment. Identify the specific errors displayed in the UI during this process. \n\n1. Review the admin interface code, particularly the components responsible for loading and managing namespaces.\n2. Check for any error messages in the console and correlate them with the user interface issues.\n3. Investigate the data flow and state management related to namespace loading; ensure that the correct data is being passed to the UI components.\n4. Implement error handling to provide clearer feedback in the UI.\n5. Test the changes thoroughly in different scenarios to ensure that loading namespaces does not produce errors.\n\nBegin by isolating the error source and replicating the issue in a development environment.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-ctf-party/issues/665", @@ -2602,10 +2694,11 @@ "pk": 93, "fields": { "nest_created_at": "2024-09-11T19:23:40.415Z", - "nest_updated_at": "2024-09-11T19:23:40.415Z", + "nest_updated_at": "2024-09-13T15:05:15.591Z", "node_id": "I_kwDOHpaoic6BytQC", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description. To address this, a concise and informative description needs to be created that outlines the project's purpose, features, and any relevant details for potential users or contributors.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-application-security-playbook/issues/1", @@ -2632,6 +2725,7 @@ "node_id": "I_kwDOHeRoz85gnpT4", "title": "Implement semgrep for all langs", "body": "Create a github action that runs the free version of codeQL and Semgrep for all languages.", + "summary": "Implement Semgrep integration for all supported programming languages. \n\n1. Create a GitHub Action workflow file in the `.github/workflows` directory.\n2. Define triggers for the workflow, such as `push` and `pull_request`.\n3. Set up a job that runs on the latest version of Ubuntu.\n4. Install Semgrep by adding a step to run `pip install semgrep`.\n5. Configure the CodeQL action by adding `actions/setup-codeql@v1` and specifying the languages to analyze.\n6. Add steps to run Semgrep rules for each language, ensuring to include relevant rules for analysis.\n7. Collect the results from both Semgrep and CodeQL and output them in a usable format, such as JSON or SARIF.\n8. Test the GitHub Action in a repository with sample code to validate the integration.\n\nBy following these steps, ensure that the action runs efficiently and captures any issues across all languages.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-binaries/issues/17", @@ -2660,6 +2754,7 @@ "node_id": "I_kwDOHeRoz85nH_c5", "title": "Create CTF versions on generation", "body": "Create CTF versions on compile of the files during github action and quickbuild.sh, these versions of the binaries should have:\r\n- spoiling disabled\r\n- randomize the actual secret", + "summary": "Implement CTF versions during the file generation and compilation processes in GitHub Actions and quickbuild.sh. Ensure that these binary versions have the following specifications:\n\n1. Disable spoiling in the compilation settings.\n2. Introduce randomness in the actual secret used within the binaries.\n\n**First Steps:**\n\n1. Modify the GitHub Actions workflow file to include a new job for creating CTF binaries.\n2. Update the quickbuild.sh script to add an option for generating CTF versions during the build process.\n3. Configure compiler flags or options to disable spoiling when compiling the binaries.\n4. Implement a function to randomize the secret used in the binary, ensuring it is unique for each build.\n5. Test the new CTF binaries to verify that spoiling is indeed disabled and that the secrets are randomized as intended.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-binaries/issues/23", @@ -2684,10 +2779,11 @@ "pk": 96, "fields": { "nest_created_at": "2024-09-11T19:24:04.083Z", - "nest_updated_at": "2024-09-11T22:53:40.828Z", + "nest_updated_at": "2024-09-13T17:07:19.825Z", "node_id": "I_kwDOHeRoz85nH_o0", "title": "Add pre-commit", "body": "for this we need to setup precommit that works at least on macos/linux/windows.\r\n- [ ] Add pre-commit configuration (docs, language related stuff?)\r\n- [ ] onboard wrongsecrets-binaries to automated pre-commit corrections for c/c++\r\n- [ ] onboard wrongsecrets-binaries to automated pre-commit corrections for Rust\r\n- [ ] onboard wrongsecrets-binaries to automated pre-commit corrections for Golang", + "summary": "Implement pre-commit hooks for cross-platform compatibility (macOS, Linux, Windows). \n\n1. Set up a pre-commit configuration file (e.g., `.pre-commit-config.yaml`) in the repository to define hooks and their environments.\n2. Include documentation and relevant language specifications that detail how to use and customize pre-commit hooks.\n3. Integrate automated pre-commit corrections for `wrongsecrets-binaries` in the following languages:\n - C/C++\n - Research existing pre-commit hooks for C/C++.\n - Configure hooks to automatically correct `wrongsecrets-binaries`.\n - Rust\n - Identify Rust-specific pre-commit hooks.\n - Set up necessary hooks for Rust to handle `wrongsecrets-binaries`.\n - Golang\n - Explore Golang pre-commit options.\n - Ensure hooks for Golang also address `wrongsecrets-binaries`.\n\nBegin by creating the `.pre-commit-config.yaml` file and testing it with basic hooks for one language, then expand to others.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-binaries/issues/24", @@ -2712,10 +2808,11 @@ "pk": 97, "fields": { "nest_created_at": "2024-09-11T19:24:05.309Z", - "nest_updated_at": "2024-09-11T22:53:42.074Z", + "nest_updated_at": "2024-09-13T17:07:20.900Z", "node_id": "I_kwDOHeRoz85sFR3D", "title": "Add notary for MacOS", "body": "We need to sign the macos binaries:\r\nhttps://github.com/marketplace/actions/code-sign-action ", + "summary": "Implement code signing for MacOS binaries. Utilize the GitHub Action available at [code-sign-action](https://github.com/marketplace/actions/code-sign-action) to automate the signing process.\n\n**First Steps:**\n1. Integrate the code-sign-action into your GitHub workflow file.\n2. Generate or obtain a valid MacOS Developer ID certificate.\n3. Store the certificate securely in GitHub Secrets for access during the build process.\n4. Update the workflow configuration to include the necessary parameters for the code-sign-action, such as the certificate and entitlements file.\n5. Test the workflow to ensure binaries are signed correctly and verify the signature after the build.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets-binaries/issues/36", @@ -2738,10 +2835,11 @@ "pk": 98, "fields": { "nest_created_at": "2024-09-11T19:25:01.635Z", - "nest_updated_at": "2024-09-11T19:25:01.635Z", + "nest_updated_at": "2024-09-13T15:06:18.772Z", "node_id": "I_kwDOHGUcT855-cLX", "title": "Unnecessary use of LIST permission example of attack does not work", "body": "I tired to replicate the Unnecessary use of LIST permission example attack but it does not work. I think the problem is that in the K8s version before 1.24, every time we would create a service account, a non-expiring secret token (Mountable secrets & Tokens) was created by default. However, from version 1.24 onwards, it was disbanded and no secret token is created by default when we create a service account.\r\n\r\nWhen i tried to access to http://127.0.0.1:8001/api/v1/namespaces/default/secrets/abcd link i can see the \"secretAuthToken\": \"dmVyeVNlY3VyZTEyMw==\"\r\n", + "summary": "The issue discusses the failure to replicate a specific attack example related to the unnecessary use of LIST permissions. The author notes that the attack's context relies on the presence of a non-expiring secret token associated with service accounts, which was standard practice in Kubernetes versions prior to 1.24. However, this behavior has changed in version 1.24, where no secret token is created by default for new service accounts. The author successfully accessed a secret token at a given URL, indicating that while the attack example may not function as intended in newer versions, the existence of the secret token can still be observed.\n\nTo address this issue, it may be necessary to update the attack example to reflect the changes in Kubernetes version 1.24 and clarify the conditions under which it can be replicated.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-kubernetes-top-ten/issues/53", @@ -2764,10 +2862,11 @@ "pk": 99, "fields": { "nest_created_at": "2024-09-11T19:25:06.040Z", - "nest_updated_at": "2024-09-11T19:25:06.040Z", + "nest_updated_at": "2024-09-13T15:06:22.019Z", "node_id": "I_kwDOHGSr786BytOw", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description. It suggests that a clear and concise explanation of the project's purpose and functionality would be beneficial. To address this, a brief overview outlining the project's goals, features, and usage should be developed and included in the project documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-toctourex/issues/1", @@ -2790,10 +2889,11 @@ "pk": 100, "fields": { "nest_created_at": "2024-09-11T19:25:13.295Z", - "nest_updated_at": "2024-09-11T19:25:13.295Z", + "nest_updated_at": "2024-09-13T15:06:27.918Z", "node_id": "I_kwDOHGPq186BytNV", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to improve clarity and provide context for users. To address this, a concise and informative description should be drafted and included in the project documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-supplychaingoat/issues/1", @@ -2820,6 +2920,7 @@ "node_id": "I_kwDOG95VyM5Fl4uZ", "title": "Add Initial Wavenet Logic for generating Text To Speech.", "body": "Add a folder TTS, in the repo, which will have the logic to convert Text to Speech, add text file for testing as well. \r\n\r\nYou can either use:\r\n1. Wavenets for Text to Speech Conversion. \r\n2. Google or any Open Source API ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SKF-VideoEditorAPI/issues/3", @@ -2842,10 +2943,11 @@ "pk": 102, "fields": { "nest_created_at": "2024-09-11T19:25:31.779Z", - "nest_updated_at": "2024-09-11T19:25:31.779Z", + "nest_updated_at": "2024-09-13T15:06:42.169Z", "node_id": "I_kwDOG95VyM5Gn28w", "title": "ADD CI/CD for the package ", "body": "We need CI/CD pipeline for the package for following proper SDLC", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SKF-VideoEditorAPI/issues/14", @@ -2874,6 +2976,7 @@ "node_id": "I_kwDOG95VyM5Gn3N2", "title": "Update README with all info about the package and how we can run it", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SKF-VideoEditorAPI/issues/15", @@ -2900,6 +3003,7 @@ "node_id": "I_kwDOG95VyM5HIdxV", "title": "ADD Unit Test for the existing APIs", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SKF-VideoEditorAPI/issues/20", @@ -2928,6 +3032,7 @@ "node_id": "I_kwDOG95VyM5H45vv", "title": "Adding OTP Verification API", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SKF-VideoEditorAPI/issues/24", @@ -2954,6 +3059,7 @@ "node_id": "I_kwDOG95PUM5Fl5Ks", "title": "Create UI In Figma and ADD design.md file", "body": "Figma Design for the APP will be highly appreciate, please ADD design.md consisting of every component url and screenshot in the repo as well.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SKF-VideoEditorUI/issues/4", @@ -2978,10 +3084,11 @@ "pk": 107, "fields": { "nest_created_at": "2024-09-11T19:25:41.059Z", - "nest_updated_at": "2024-09-11T19:25:41.059Z", + "nest_updated_at": "2024-09-13T15:06:46.179Z", "node_id": "I_kwDOG95PUM5H45x_", "title": "Adding OTP Verification UI", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SKF-VideoEditorUI/issues/8", @@ -3004,10 +3111,11 @@ "pk": 108, "fields": { "nest_created_at": "2024-09-11T19:25:58.098Z", - "nest_updated_at": "2024-09-11T19:25:58.098Z", + "nest_updated_at": "2024-09-13T15:06:59.645Z", "node_id": "I_kwDOG2VVl86BytMG", "title": "Please add a description to this project", "body": "", + "summary": "A request has been made to include a project description. It would be beneficial to provide an overview that outlines the project's purpose, features, and how to get started.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-chaingoat/issues/1", @@ -3030,10 +3138,11 @@ "pk": 109, "fields": { "nest_created_at": "2024-09-11T19:26:10.299Z", - "nest_updated_at": "2024-09-11T19:26:10.299Z", + "nest_updated_at": "2024-09-13T15:07:08.955Z", "node_id": "I_kwDOGwIlcs6TvxGu", "title": "Dependency Dashboard", "body": "This issue lists Renovate updates and detected dependencies. Read the [Dependency Dashboard](https://docs.renovatebot.com/key-concepts/dashboard/) docs to learn more.\n\nThis repository currently has no open or pending branches.\n\n## Detected dependencies\n\n
regex\n
\n\n
info.md\n\n - `corazawaf/coraza 3.2.1`\n\n
\n\n
\n
\n\n---\n\n- [ ] Check this box to trigger a request for Renovate to run again on this repository\n\n", + "summary": "Implement a Dependency Dashboard for the repository. Utilize Renovate to monitor and manage updates for dependencies. Refer to the [Dependency Dashboard documentation](https://docs.renovatebot.com/key-concepts/dashboard/) for guidance.\n\n1. Note that the repository currently has no open or pending branches.\n2. Identify the detected dependency: `corazawaf/coraza` version `3.2.1`.\n3. To trigger Renovate to run again, check the provided box. \n\nBegin by setting up Renovate if it isn't already configured, and ensure it has access to the repository's dependencies. After that, actively monitor the dashboard for updates and manage them accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-coraza-web-application-firewall/issues/9", @@ -3056,10 +3165,11 @@ "pk": 110, "fields": { "nest_created_at": "2024-09-11T19:26:14.303Z", - "nest_updated_at": "2024-09-11T19:26:14.303Z", + "nest_updated_at": "2024-09-13T15:07:12.280Z", "node_id": "I_kwDOGwIRCM5Rx9gw", "title": "Supply Chain Management section doesn't focus on the \"management\"", "body": "As a small improvement, I believe the supply chain management section should look into the \"management\" of the supply chain, more than the continuous scanning that is mentioned in the implementation phase. This section _should_ focus more around the inventory of applications and services that are deployed/being-used/etc (*BOMs are the direct example here) and the ability to go through them to understand what you have, and what you can filter whenever a need comes up (you need to search for a CVE, you need to understand how many apps use a specific library, etc.)", + "summary": "The GitHub issue suggests that the Supply Chain Management section lacks emphasis on the \"management\" aspect of supply chains. It recommends that this section should concentrate on inventorying applications and services, such as BOMs, to enhance understanding of deployed resources. The focus should include the ability to filter and search for specific needs, such as identifying CVEs or tracking library usage across applications. Consider revising the section to address these points for improved clarity and functionality.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-application-security-awareness-campaigns/issues/1", @@ -3082,10 +3192,11 @@ "pk": 111, "fields": { "nest_created_at": "2024-09-11T19:26:40.955Z", - "nest_updated_at": "2024-09-11T19:26:40.955Z", + "nest_updated_at": "2024-09-13T15:07:33.453Z", "node_id": "I_kwDOGdcO-85AVcdr", "title": "Layout issue", "body": "The page build failed for the `main` branch with the following error:\r\n\r\nA file was included in `/_layouts/col-sidebar.html` that is a symlink or does not exist in your `_includes` directory. \r\n\r\n\r\nThe page build failed for the `main` branch with the following error:\r\n\r\nThe tag `https` on line 52 in `index.md` is not a recognized Liquid tag", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-kanchipuram/issues/1", @@ -3112,6 +3223,7 @@ "node_id": "I_kwDOGDmZaM5HOG_e", "title": "Build an Operation that will allow running arbitrary commands", "body": "Sometimes it's useful to run an arbitrary command or external script when receiving a response.", + "summary": "Implement an Operation to execute arbitrary commands or external scripts upon receiving a response. \n\n1. Define the operation interface to accept command strings and necessary parameters. \n2. Utilize a command execution library compatible with the target environment (e.g., `subprocess` in Python).\n3. Ensure proper handling of input and output streams to capture command results or errors.\n4. Implement error handling for command execution failure, including logging and response handling.\n5. Create a configuration option to enable or disable this feature for security considerations.\n6. Write unit tests to validate command execution with various inputs and ensure safe handling of commands.\n\nBegin by setting up the project structure and identifying the language/framework used. Then, create a prototype of the command execution function using the chosen execution library.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/21", @@ -3143,6 +3255,7 @@ "node_id": "I_kwDOGDmZaM5HOHLM", "title": "Add a JWT parser", "body": "JWT is used in many places already, Raider needs a way to inspect a JWT object", + "summary": "Implement a JWT parser for Raider to enable inspection of JWT objects. \n\n1. Research existing JWT libraries available for the programming language used in Raider. Consider libraries like `jsonwebtoken` for JavaScript or `pyjwt` for Python.\n \n2. Create a new module or class specifically for JWT parsing. Define functions to decode and verify JWT tokens, ensuring to handle different signing algorithms (e.g., HS256, RS256).\n\n3. Implement error handling for invalid tokens and provide meaningful error messages.\n\n4. Write unit tests for the JWT parser to ensure its reliability and correctness. Test various cases, including valid tokens, expired tokens, and malformed tokens.\n\n5. Update the Raider documentation to include usage examples and integration instructions for the new JWT parser.\n\n6. Open a pull request to merge the JWT parser into the main branch, allowing for code review and further enhancements.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/22", @@ -3174,6 +3287,7 @@ "node_id": "I_kwDOGDmZaM5HOIm5", "title": "Write a tutorial with a demo on access control", "body": "Write a technical blog post detailing instructions on how to set up and run access control automatically using Raider.", + "summary": "Create a comprehensive tutorial that demonstrates access control using Raider. Include step-by-step instructions to set up and run the access control system automatically. \n\n1. **Define Access Control**: Start the tutorial by explaining what access control is and its importance in securing applications.\n\n2. **Set Up Raider**: \n - Install Raider on your system by cloning the repository from GitHub.\n - Navigate to the Raider directory and run the installation commands specific to your environment.\n\n3. **Configure Access Control**:\n - Describe how to configure access control settings in Raider. Include examples of JSON or YAML configuration files that specify user roles and permissions.\n\n4. **Implement Access Control Logic**: \n - Write code snippets showing how to integrate access control checks within your application. Use Raider's API to manage user permissions effectively.\n\n5. **Test the Implementation**: \n - Provide instructions for running tests to ensure that the access control mechanism works as intended. Use unit tests to validate role-based access.\n\n6. **Demo Application**: \n - Create a simple demo application that showcases the access control features. Walk through the steps needed to run the application and test different user roles.\n\n7. **Troubleshooting**: \n - Include a section on common issues and solutions when setting up access control with Raider.\n\n8. **Conclusion**: \n - Summarize the benefits of implementing access control and encourage users to explore further customizations with Raider.\n\nBy following these steps, ensure the tutorial is detailed, clear, and provides a hands-on experience for the reader.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/24", @@ -3205,6 +3319,7 @@ "node_id": "I_kwDOGDmZaM5HOI5Q", "title": "Update documentation for all operations", "body": "Progress:\r\n- [ ] NextStage\r\n- [ ] Print\r\n- [ ] Save\r\n- [ ] Error\r\n- [ ] Http\r\n- [ ] Grep\r\n- [ ] Writing custom operations", + "summary": "Update documentation for all operations listed below:\n\n1. **NextStage**: Specify usage scenarios, parameters, and examples. Include error handling procedures.\n2. **Print**: Detail the output formatting options and potential use cases. Provide code snippets for clarity.\n3. **Save**: Explain the different save options and formats available, along with examples of how to implement them.\n4. **Error**: Document error types, common error messages, and troubleshooting tips. Create a section for best practices.\n5. **Http**: Outline the HTTP methods supported, authentication mechanisms, and error handling. Include examples of API calls.\n6. **Grep**: Clarify the syntax, options available, and typical use cases. Provide sample commands for various scenarios.\n7. **Writing custom operations**: Provide guidelines on how to create custom operations, including structure, naming conventions, and integration with existing operations.\n\n**First Steps**:\n- Review current documentation for each operation.\n- Gather examples and use cases from existing code and user feedback.\n- Define a consistent format for documentation updates.\n- Assign team members to specific operations for faster progress.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/26", @@ -3236,6 +3351,7 @@ "node_id": "I_kwDOGDmZaM5HOI89", "title": "Update documentation for all Plugins", "body": "Progress:\r\n- [X] Common\r\n - [X] Plugin\r\n - [X] Parser\r\n - [X] Processor\r\n - [X] Empty\r\n- [X] Basic\r\n - [X] Variable\r\n - [X] Prompt\r\n - [X] Cookie\r\n - [X] Header\r\n - [X] File\r\n - [X] Command\r\n - [X] Regex\r\n - [X] Html\r\n - [ ] Json\r\n- [ ] Modifiers\r\n - [ ] Alter\r\n - [ ] Combine\r\n- [ ] Parsers\r\n - [ ] Urlparser\r\n- [ ] Processors\r\n - [ ] Urlencode\r\n - [ ] Urldecode\r\n - [ ] B64encode\r\n - [ ] B64decode\r\n- [ ] Writing custom plugins", + "summary": "Update the documentation for all plugins. Ensure that the following sections are completed:\n\n1. **Common**\n - Confirm the following are documented: \n - Plugin\n - Parser\n - Processor\n - Empty\n\n2. **Basic**\n - Document the following plugins:\n - Variable\n - Prompt\n - Cookie\n - Header\n - File\n - Command\n - Regex\n - Html\n - Json (incomplete)\n\n3. **Modifiers**\n - Complete documentation for:\n - Alter\n - Combine\n\n4. **Parsers**\n - Document the Urlparser.\n\n5. **Processors**\n - Document the following processors:\n - Urlencode\n - Urldecode\n - B64encode\n - B64decode\n\n6. **Writing Custom Plugins**\n - Provide comprehensive guidelines for writing custom plugins.\n\n**First Steps:**\n- Review existing documentation for the completed sections to ensure accuracy.\n- Identify the key features and functionalities of the plugins that require documentation.\n- Draft documentation for the incomplete entries, focusing on clear examples and usage scenarios.\n- Collaborate with other developers to gather insights and feedback on the custom plugins section.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/27", @@ -3267,6 +3383,7 @@ "node_id": "I_kwDOGDmZaM5HOLcN", "title": "Build a simple mechanism to run bruteforce attacks", "body": "", + "summary": "Implement a simple bruteforce attack mechanism. \n\n1. Define the target system and the authentication method (e.g., username/password, API tokens).\n2. Create a list of potential usernames and passwords. Use dictionaries or generate combinations programmatically.\n3. Choose a programming language suitable for network requests (e.g., Python, JavaScript).\n4. Utilize libraries for handling HTTP requests (e.g., `requests` in Python).\n5. Set up a loop to iterate through the usernames and passwords, sending requests to the target system.\n6. Implement error handling to capture failed attempts and check responses for successful authentication.\n7. Consider adding delays between requests to avoid detection and rate limiting.\n\nBegin by testing against a controlled environment to ensure compliance and avoid legal issues.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/29", @@ -3284,9 +3401,9 @@ 59 ], "labels": [ - 35, 40, - 41 + 41, + 35 ] } }, @@ -3299,6 +3416,7 @@ "node_id": "I_kwDOGDmZaM5HOLpE", "title": "Fuzz more than one input at a time", "body": "", + "summary": "Implement a feature to allow fuzzing of multiple inputs concurrently. Focus on modifying the fuzzing engine to handle multiple inputs in parallel without conflicts. \n\n1. Analyze the current architecture to understand how inputs are processed sequentially.\n2. Identify potential race conditions or shared resource conflicts when inputs are fuzzed simultaneously.\n3. Refactor the input handling mechanism to support concurrent processing, possibly using threads or asynchronous I/O.\n4. Update the input generation logic to create and manage multiple input streams.\n5. Ensure proper logging and error handling for each input to track the results effectively.\n6. Test the implementation with various input combinations to validate stability and performance.\n\nConsider reviewing existing fuzzing frameworks for inspiration and best practices in concurrent input handling.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/30", @@ -3316,10 +3434,10 @@ 59 ], "labels": [ - 35, 40, 41, - 42 + 42, + 35 ] } }, @@ -3332,6 +3450,7 @@ "node_id": "I_kwDOGDmZaM5HOMDA", "title": "Add shortcuts for common fuzzing payloads", "body": "Add shortcuts to generate random integers, characters, etc... for fuzzing purposes", + "summary": "Implement shortcuts for generating common fuzzing payloads, such as random integers and characters. \n\n1. Define a list of commonly used payload types for fuzzing, including:\n - Random integers (specify range)\n - Random characters (specify character set)\n - Random strings (specify length and character set)\n - Special characters or control sequences\n\n2. Create a utility function or class to generate these payloads efficiently. Ensure it handles edge cases and provides options for customization.\n\n3. Integrate the shortcuts into the existing fuzzing framework, allowing users to easily access these functionalities.\n\n4. Update the documentation to include usage examples and instructions on how to utilize the new shortcuts for fuzzing.\n\n5. Consider implementing a configuration file where users can define their preferences for payload generation.\n\nStart by reviewing the current codebase for existing payload generation methods, and identify areas where shortcuts can be implemented or improved.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/31", @@ -3349,9 +3468,9 @@ 59 ], "labels": [ - 35, 40, - 41 + 41, + 35 ] } }, @@ -3364,6 +3483,7 @@ "node_id": "I_kwDOGDmZaM5HOMRw", "title": "Make fuzzing faster with threads", "body": "Currently fuzzing runs slow because it's using a single thread.", + "summary": "Enhance fuzzing performance by implementing multithreading. Analyze the current single-threaded fuzzing process to identify bottlenecks. \n\n1. Investigate the existing codebase to understand how the fuzzing loop operates. \n2. Refactor the code to divide the fuzzing workload across multiple threads.\n3. Utilize a thread pool to manage the threads efficiently and avoid frequent creation and destruction of thread instances.\n4. Ensure proper synchronization mechanisms are in place to handle shared resources and prevent race conditions.\n5. Test the modified fuzzing process to confirm that multithreading improves performance without sacrificing accuracy. \n\nBegin by setting up a development environment where you can safely test changes to the fuzzing implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/32", @@ -3381,9 +3501,9 @@ 59 ], "labels": [ - 35, 40, - 41 + 41, + 35 ] } }, @@ -3396,6 +3516,7 @@ "node_id": "I_kwDOGDmZaM5HOM_f", "title": "Document fuzzing", "body": "", + "summary": "Implement comprehensive fuzzing documentation. \n\n1. Define fuzzing: Explain what fuzzing is and its importance in software testing.\n2. Outline fuzzing techniques: Describe different types of fuzzing, such as mutation-based, generation-based, and protocol-based fuzzing.\n3. Detail setup instructions: Provide step-by-step guidelines for setting up a fuzzing environment, including necessary tools and dependencies.\n4. Include usage examples: Offer practical examples of how to run fuzzers on specific applications or codebases.\n5. Highlight best practices: List best practices for effective fuzzing, such as targeting specific inputs, monitoring application behavior, and interpreting results.\n\nFirst steps:\n- Research existing fuzzing frameworks and tools like AFL, LibFuzzer, or Honggfuzz.\n- Create a basic tutorial for setting up one of these tools in a sample project.\n- Collect common pitfalls and troubleshooting tips from community discussions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/33", @@ -3413,9 +3534,9 @@ 59 ], "labels": [ - 38, 40, - 41 + 41, + 38 ] } }, @@ -3428,6 +3549,7 @@ "node_id": "I_kwDOGDmZaM5Th9LM", "title": "Document command line interface", "body": "Add documentation for the command line interface", + "summary": "Document the command line interface (CLI). Include detailed instructions on usage, commands, options, and examples. Start by identifying all the available commands and their functionalities. \n\n1. List all CLI commands and describe their purpose.\n2. For each command, specify required and optional arguments.\n3. Provide usage examples for clarity, demonstrating common scenarios.\n4. Include error handling information and troubleshooting tips.\n5. Organize the documentation into sections for easy navigation.\n\nBegin by gathering existing code comments and any informal documentation. Review the CLI codebase to ensure accuracy and completeness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/76", @@ -3445,8 +3567,8 @@ 59 ], "labels": [ - 38, - 43 + 43, + 38 ] } }, @@ -3459,6 +3581,7 @@ "node_id": "I_kwDOGDmZaM5Th-8Z", "title": "Add example showing how to write custom Operations/Plugins", "body": "", + "summary": "Create an example demonstrating the process of writing custom Operations/Plugins. \n\n1. Define the purpose of the custom operation/plugin.\n2. Set up the development environment, ensuring all dependencies are installed.\n3. Create a new file for the custom operation/plugin in the appropriate directory.\n4. Implement the core functionality, adhering to the existing code structure and conventions.\n5. Write unit tests to validate the custom operation/plugin.\n6. Document the code, explaining how to use the custom operation/plugin effectively.\n7. Test the implementation in a sample project to ensure compatibility and performance.\n8. Submit a pull request with the new example, including a clear description of the changes made. \n\nFocus on providing clear, concise code snippets and explanations in the example for better understanding.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/77", @@ -3476,9 +3599,9 @@ 59 ], "labels": [ - 38, - 37, 36, + 37, + 38, 44, 45 ] @@ -3493,6 +3616,7 @@ "node_id": "I_kwDOGDmZaM5TiFlr", "title": "Add attack examples to documentation", "body": "- [ ] Fuzzing username/password\r\n - [ ] Bruteforce\r\n - [ ] User enumeration\r\n - [ ] User registration\r\n - [ ] Password reset\r\n- [ ] Multi-factor authentication\r\n - [ ] Bruteforcing MFA code\r\n - [ ] Bypassing MFA checks\r\n- [ ] Session management\r\n - [ ] Session fixation\r\n - [ ] Session hijacking\r\n - [ ] Improper session termination\r\n- [ ] Authentication bypass\r\n - [ ] Replay attack\r\n - [ ] Missing critical authentication step\r\n - [ ] Using alternate step to bypass security restrictions\r\n - [ ] Missing authentication for critical functions\r\n- [ ] OAuth2.0\r\n - [ ] bruteforcing client_id/client_secret\r\n - [ ] redirect_uri vulnerabilities\r\n - [ ] abusing OAuth grant types\r\n - [ ] PKCE downgrade\r\n - [ ] bruteforce available scopes\r\n - [ ] automate username/password bruteforce attacks on OAuth\r\n - [ ] CSRF attacks\r\n - [ ] leaking authorization code\r\n - [ ] leaking access/refresh tokens\r\n - [ ] improper token invalidation ", + "summary": "Add attack examples to the documentation. Include specific details for each category:\n\n1. **Fuzzing Username/Password:**\n - Document approaches for:\n - Bruteforce attacks\n - User enumeration tactics\n - Exploiting user registration processes\n - Attacking password reset functionalities\n\n2. **Multi-Factor Authentication (MFA):**\n - Provide examples for:\n - Bruteforcing MFA codes\n - Bypassing MFA checks through various means\n\n3. **Session Management:**\n - Illustrate vulnerabilities related to:\n - Session fixation attacks\n - Session hijacking methodologies\n - Improper session termination scenarios\n\n4. **Authentication Bypass:**\n - Detail techniques such as:\n - Conducting replay attacks\n - Identifying missing critical authentication steps\n - Utilizing alternate steps to bypass security restrictions\n - Recognizing functions lacking authentication\n\n5. **OAuth 2.0 Vulnerabilities:**\n - Explain risks associated with:\n - Bruteforcing client_id and client_secret\n - Redirect_uri vulnerabilities\n - Abusing various OAuth grant types\n - PKCE downgrade attacks\n - Bruteforcing available scopes\n - Automating username/password bruteforce attacks on OAuth\n - CSRF attack scenarios\n - Leaking authorization codes\n - Leaking access and refresh tokens\n - Improper token invalidation issues\n\n**First Steps:**\n- Gather real-world attack scenarios and examples for each category.\n- Collaborate with security experts to validate the examples.\n- Prepare detailed write-ups for each attack, including potential mitigations.\n- Update the documentation repository with the new content and ensure it is easily accessible.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/78", @@ -3510,9 +3634,9 @@ 59 ], "labels": [ - 38, 40, - 45 + 45, + 38 ] } }, @@ -3525,6 +3649,7 @@ "node_id": "I_kwDOGDmZaM5Tj54i", "title": "Write tutorials with sample authentications on authenticationtest.com", "body": "", + "summary": "Create tutorials for authentication methods used on authenticationtest.com. \n\n1. Identify key authentication methods to cover, such as OAuth, JWT, and Basic Authentication.\n2. Develop sample code snippets for each method, ensuring clarity and simplicity.\n3. Include step-by-step instructions on how to implement each authentication method.\n4. Provide explanations of security considerations and best practices associated with each method.\n5. Develop a section for troubleshooting common issues that users may encounter during implementation.\n6. Format the tutorials for easy readability and accessibility on the website.\n\nBegin by gathering documentation from authenticationtest.com to ensure accuracy. Then, outline the structure of each tutorial before diving into coding and writing.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/80", @@ -3542,9 +3667,9 @@ 60 ], "labels": [ + 46, 38, - 39, - 46 + 39 ] } }, @@ -3557,6 +3682,7 @@ "node_id": "I_kwDOGDmZaM5Tj59e", "title": "Write a tutorial on testing OAuth", "body": "Using Keycloak should work", + "summary": "Create a tutorial on testing OAuth with Keycloak. Begin by outlining the prerequisites: ensure you have Keycloak installed and running, and access to a web application or API that requires OAuth authentication.\n\n1. **Set Up Keycloak**:\n - Download and install Keycloak.\n - Start the Keycloak server.\n - Access the Keycloak admin console (typically at `http://localhost:8080/auth`).\n\n2. **Configure a Realm**:\n - Create a new realm in Keycloak.\n - Define the realm settings as needed for your application.\n\n3. **Create a Client**:\n - In the created realm, navigate to \"Clients\" and create a new client.\n - Set the client ID and configure the client settings (e.g., access type as `confidential` or `public`).\n - Note the client secret if using a confidential client.\n\n4. **Set Up Roles and Users**:\n - Define roles within the realm to control access.\n - Create users and assign them the necessary roles.\n\n5. **Test OAuth Flow**:\n - Use Postman or a similar tool to initiate the OAuth flow.\n - Request an authorization code by navigating to the authorization endpoint.\n - Exchange the authorization code for an access token by calling the token endpoint.\n\n6. **Validate the Access Token**:\n - Decode the access token using a JWT library.\n - Verify claims such as expiration and audience.\n\n7. **Test API Access**:\n - Use the obtained access token to make requests to your API.\n - Ensure that the API correctly validates the token and responds based on the user's roles.\n\n8. **Automate the Testing**:\n - Write scripts using testing frameworks (e.g., Jest, Mocha) to automate the OAuth flow and API access tests.\n - Include tests for valid and invalid tokens.\n\n9. **Document the Process**:\n - Clearly document each step, providing code snippets and configuration details.\n - Include troubleshooting tips for common issues.\n\nBy following these steps, you will create a comprehensive tutorial for testing OAuth using Keycloak.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/81", @@ -3574,9 +3700,9 @@ 59 ], "labels": [ + 47, 38, - 39, - 47 + 39 ] } }, @@ -3589,6 +3715,7 @@ "node_id": "I_kwDOGDmZaM5Tj6Op", "title": "Write tutorials on how to exploit various JuiceShop issues", "body": "Use [OWASP Juiceshop](https://owasp.org/www-project-juice-shop/) to demo some ways to exploit issues using raider", + "summary": "Create tutorials demonstrating the exploitation of various OWASP Juice Shop vulnerabilities using Raider. \n\n1. **Set Up Environment**:\n - Clone the OWASP Juice Shop repository.\n - Install Raider by following the documentation on its GitHub page.\n\n2. **Identify Vulnerabilities**:\n - Review the Juice Shop documentation to understand its known vulnerabilities.\n - Use tools like OWASP ZAP or Burp Suite to scan the Juice Shop instance for security issues.\n\n3. **Develop Tutorials**:\n - For each identified vulnerability, write a detailed tutorial that includes:\n - **Description**: Explain the vulnerability and its impact.\n - **Demonstration**: Show how to exploit the vulnerability using Raider.\n - **Mitigation**: Provide suggestions on how to fix the issue in a real application.\n\n4. **Organize Content**:\n - Structure the tutorials in a clear format, using headings, code snippets, and screenshots where applicable.\n\n5. **Publish and Share**:\n - Create a dedicated branch in the GitHub repository for the tutorials.\n - Use Markdown for formatting and ensure the content is accessible and easy to understand.\n\nBy following these steps, effectively educate users on exploiting and securing applications against vulnerabilities through practical demonstrations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/83", @@ -3620,6 +3747,7 @@ "node_id": "I_kwDOGDmZaM5Tj7Vk", "title": "Build a complete CLI", "body": "Current command line interface is still lacking in features. Following commands are planned:\r\n\r\n- [ ] show\r\n - [x] projects\r\n - [x] hyfiles\r\n - [x] graphs\r\n - [x] flows\r\n - [ ] plugins\r\n - [ ] inputs\r\n - [ ] outputs\r\n - [ ] operations\r\n- [x] config\r\n - [x] proxy\r\n - [x] verify\r\n - [x] loglevel\r\n - [x] user agent\r\n - [x] active project\r\n- [ ] new\r\n - [x] project\r\n - [x] hyfile\r\n - [ ] user\r\n - [ ] copy project\r\n- [x] delete\r\n - [x] project\r\n - [x] hyfiles\r\n- [ ] edit\r\n - [ ] rename project\r\n - [ ] rename hyfile\r\n - [x] edit hyfile by filename\r\n - [ ] edit hyfile by Flow/FlowGraph name\r\n- [ ] inspect\r\n - [ ] inspect HTTP requests/responses\r\n - [ ] show inputs/outputs\r\n- [ ] run\r\n - [X] single Flow\r\n - [X] FlowGraph\r\n - [X] Chain Flows/FlowGraphs\r\n - [ ] choose a User\r\n- [ ] fuzz\r\n - [ ] fuzz single flow\r\n - [ ] fuzz plugin in a flowgraph\r\n- [ ] shell\r\n - [ ] run ipython to debug configuration\r\n - [ ] run hy to debug configuration\r\n", + "summary": "Enhance the command line interface (CLI) by implementing the following planned commands and their associated features. \n\n1. **Show Command**: \n - Implement `show plugins`, `show inputs`, `show outputs`, and `show operations` functionalities to complete this command.\n \n2. **New Command**: \n - Finalize `new user` and `new copy project` features, which are currently incomplete.\n\n3. **Edit Command**:\n - Develop `rename project`, `rename hyfile`, and `edit hyfile by Flow/FlowGraph name` functionalities to extend this command.\n\n4. **Inspect Command**:\n - Introduce `inspect HTTP requests/responses` and `show inputs/outputs` features to enhance the inspection capabilities of the CLI.\n\n5. **Run Command**:\n - Add the ability to `choose a User` for running flows, which is currently missing.\n\n6. **Fuzz Command**:\n - Create functionalities for `fuzz single flow` and `fuzz plugin in a flowgraph` to allow fuzz testing options.\n\n7. **Shell Command**:\n - Implement options to `run ipython` and `run hy` for debugging configuration issues.\n\n**First Steps**:\n- Begin by prioritizing the implementation of the `show`, `new`, and `edit` commands since they have multiple incomplete functionalities.\n- Set up a development environment and create feature branches for each command.\n- Write unit tests for each new feature to ensure reliability.\n- Review existing codebase for any reusable components that can be utilized in the new implementations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/85", @@ -3637,8 +3765,8 @@ 59 ], "labels": [ - 35, - 43 + 43, + 35 ] } }, @@ -3651,6 +3779,7 @@ "node_id": "I_kwDOGDmZaM5Tj737", "title": "Build a way to debug hyfiles", "body": "- [ ] print hylang code of flows in logs\r\n- [ ] run an interactive hy shell\r\n- [ ] run an interactive python shell\r\n- [ ] run scripts from CLI\r\n- [ ] interact with plugins\r\n - [ ] breakpoint on plugin used as input\r\n - [ ] show current plugin value\r\n - [ ] send the request with new value\r\n - [ ] receive the response \r\n - [ ] runs operations\r\n - [ ] continue next flow in the graph\r\n - [ ] set up NextStage\r\n- [ ] interact with flows\r\n - [ ] breakpoint before a flow's request is sent\r\n - [ ] or run just a flow ignoring previous flows in the graph\r\n - [ ] show current request inputs (including missing ones)\r\n - [ ] asks the user to change any value\r\n - [ ] supply missing values\r\n - [ ] sends new request\r\n - [ ] receives the response\r\n - [ ] run operations\r\n - [ ] continue to next flow\r\n - [ ] or manually set the NextStage \r\n", + "summary": "Implement a debugging system for hyfiles by following these steps:\n\n1. **Log Hylang Code**: Ensure that the Hylang code of flows is printed in the logs for easy reference.\n \n2. **Interactive Shells**: \n - Integrate an interactive Hy shell for executing Hylang commands.\n - Integrate an interactive Python shell for executing Python commands.\n\n3. **CLI Script Execution**: Enable the ability to run scripts directly from the command-line interface (CLI).\n\n4. **Plugin Interaction**: \n - Set breakpoints on plugins used as input to monitor their execution.\n - Display the current value of the plugin.\n - Implement functionality to send requests with modified values.\n - Handle the response received from the plugin.\n - Allow for various operations to be executed post-response.\n - Enable continuation to the next flow in the graph or set up the NextStage.\n\n5. **Flow Interaction**: \n - Introduce breakpoints before a flow's request is dispatched.\n - Allow execution of a single flow while bypassing prior flows in the graph.\n - Display current request inputs, highlighting any missing values.\n - Prompt the user to modify values as needed.\n - Supply any missing values and resend the request.\n - Capture and process the response, allowing for subsequent operations.\n - Continue to the next flow or manually set the NextStage as necessary.\n\nBy addressing these tasks, create a robust debugging framework that enhances the development and troubleshooting of hyfiles. Start by establishing logging mechanisms and integrating interactive shells, then progressively implement the plugin and flow interaction features.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/86", @@ -3682,6 +3811,7 @@ "node_id": "I_kwDOGDmZaM5Tj9tU", "title": "Update user agent version in common.hy", "body": "Currently raider doesn't update its version so the one used in _common.hy will only be used even if newer version is installed", + "summary": "Update the user agent version in `common.hy`. Ensure that the raider updates its version correctly to reflect any newer versions installed. \n\n1. Locate the section in `common.hy` where the user agent version is defined.\n2. Modify the logic to check for the installed version of the raider and update the user agent accordingly.\n3. Implement a version-checking mechanism that compares the current version with the installed version.\n4. Test the changes to verify that the user agent reflects the latest installed version. \n5. Document the changes made for clarity and future reference.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/87", @@ -3699,8 +3829,8 @@ 59 ], "labels": [ - 46, - 48 + 48, + 46 ] } }, @@ -3713,6 +3843,7 @@ "node_id": "I_kwDOGDmZaM5Tj91t", "title": "Add a new hyfile next to common.hy that isn't changed by raider", "body": "Some raider settings might be desired to be set by the user globally and read upon starting raider. A new file is needed for that, since common.hy is changed by raider automatically.", + "summary": "Create a new configuration file, named `user_settings.hy`, situated alongside `common.hy`. Ensure that this file is not modified by the raider tool during its operations. \n\n1. Define the purpose of `user_settings.hy` to allow users to set global raider settings that persist across sessions.\n2. Implement functionality within the raider startup process to read configurations from `user_settings.hy`.\n3. Ensure that modifications to `common.hy` do not affect or overwrite the settings defined in `user_settings.hy`.\n4. Document the format and options available for user settings in the new file for clarity.\n5. Test the integration by verifying that settings in `user_settings.hy` are correctly loaded at startup and do not conflict with any modifications made to `common.hy`.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/88", @@ -3730,8 +3861,8 @@ 59 ], "labels": [ - 35, 42, + 35, 44 ] } @@ -3745,6 +3876,7 @@ "node_id": "I_kwDOGDmZaM5Tj-Dd", "title": "Build a mechanism to print the diff between responses", "body": "Often during pentesting one changes the request input then compares the 2 responses to check for differences. Raider needs a mechanism to print the diff of 2 responses when using different values as the input of a plugin.", + "summary": "Implement a diff mechanism for comparing responses in Raider. \n\n1. Create a function that captures responses from plugin input variations.\n2. Utilize a diffing library (e.g., `difflib` in Python) to compare the two response outputs.\n3. Format the diff output for clarity, highlighting additions, deletions, and modifications.\n4. Integrate this function into the existing plugin workflow, ensuring it triggers automatically when multiple responses are generated.\n5. Test the implementation with various input scenarios to validate accuracy and performance.\n\nBegin by reviewing the current response handling in Raider, then proceed to set up the diffing functionality.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/89", @@ -3762,9 +3894,9 @@ 59 ], "labels": [ - 35, 40, - 42 + 42, + 35 ] } }, @@ -3777,6 +3909,7 @@ "node_id": "I_kwDOGDmZaM5Tj-TX", "title": "Add an easier way to encode/decode and get the hashes of inputs", "body": "Currently the user needs to import libraries in hyfiles when they want for example to calculate the md5 hash of some string before sending it. Since this is common, it would make sense to import them automatically so the user has to write less code to actually use those.", + "summary": "Implement an automatic import feature for commonly used encoding, decoding, and hashing libraries in hyfiles. This enhancement will simplify the user experience by allowing direct access to functions like MD5 hash calculations without manual imports.\n\n1. Identify the most commonly used libraries for encoding, decoding, and hashing (e.g., `hashlib` for MD5).\n2. Modify the hyfiles initialization process to include these libraries automatically.\n3. Create utility functions for encoding and decoding, as well as for calculating hashes, to streamline user interaction.\n4. Update documentation to reflect the new automatic imports and usage examples.\n5. Consider backward compatibility to ensure existing code remains functional without requiring changes.\n\nBegin by investigating the current import process in hyfiles and design the implementation plan for automatic imports.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/90", @@ -3794,8 +3927,8 @@ 59 ], "labels": [ - 35, 42, + 35, 44 ] } @@ -3809,6 +3942,7 @@ "node_id": "I_kwDOGDmZaM5Tj-jK", "title": "Work with sessions in raider", "body": "Raider would benefit from saving the history of all generated inputs/received outputs in a session file in _project.hy. To do this we need:\r\n\r\n- [ ] Saving all plugin inputs\r\n- [ ] Saving all plugin outputs ", + "summary": "Implement session management in Raider. Modify the project to save the history of all generated inputs and received outputs in a session file named `_project.hy`. \n\n1. **Save Plugin Inputs**: \n - Create a mechanism to log all inputs provided to the plugins during a session.\n - Ensure that this data is saved in a structured format within the session file.\n \n2. **Save Plugin Outputs**: \n - Capture the outputs generated by the plugins after processing the inputs.\n - Store this information alongside the corresponding inputs in the session file for easy tracking.\n\nBegin by defining the data structure for the session file. Then, implement functions to write inputs and outputs to the file during the plugin execution. Test this functionality to ensure that all relevant data is being captured and saved correctly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/91", @@ -3826,10 +3960,10 @@ 59 ], "labels": [ - 35, - 38, + 43, 42, - 43 + 35, + 38 ] } }, @@ -3842,6 +3976,7 @@ "node_id": "I_kwDOGDmZaM5Tj_KO", "title": "Traverse the graphs to find vulnerabilities", "body": "Similar to [Modeling and Discovering Vulnerabilities with Code Property Graphs\r\n](https://www.sec.cs.tu-bs.de/pubs/2014-ieeesp.pdf) it should be possible to traverse graphs to identify vulnerabilities in authentication systems. The linked paper is about finding linux kernel vulnerabilities having the source code available. When testing authentication, we usually don't have the source code, so the graph instead is built using raider Flow objects, each with its own inputs, outputs, and a way to conditionally decide what the next stage is. At the moment of writing this, the graph architecture still isn't fully implemented so it's not yet possible to start experimenting with this. I wrote this ticket to keep track of the research done towards this goal, and to have a place to discuss the progress towards it.", + "summary": "Implement graph traversal techniques to identify vulnerabilities in authentication systems. Reference the methodology from the paper \"Modeling and Discovering Vulnerabilities with Code Property Graphs.\" Since direct source code access is typically unavailable, utilize raider Flow objects to construct the graph, which includes inputs, outputs, and conditional pathways for traversal.\n\nTo begin tackling this problem:\n\n1. Complete the implementation of the graph architecture to facilitate experimentation.\n2. Develop algorithms to traverse the constructed graphs systematically.\n3. Identify key patterns and vulnerabilities specific to authentication systems that can be mapped onto the graph structure.\n4. Document findings and discuss progress within this GitHub issue for collaborative research and development.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/92", @@ -3874,6 +4009,7 @@ "node_id": "I_kwDOGDmZaM5Tj_YZ", "title": "Integrate raider with other tools", "body": "Some tool would benefit from having an integration allowing to easily authenticate anywhere. I created this issue to keep an eye on what integrations would be worth considering:\r\n\r\n- [ ] ZAProxy\r\n- [ ] BurpSuite\r\n- [ ] mitmproxy\r\n- [ ] browser extensions (to generate hyfiles from HTTP requests/responses)\r\n- [ ] emacs (hey, it's LISP!)", + "summary": "Integrate Raider with essential tools for improved authentication. Focus on the following integrations:\n\n- Implement support for ZAProxy.\n- Develop a connection with BurpSuite.\n- Enable compatibility with mitmproxy.\n- Create browser extensions to generate hyfiles from HTTP requests/responses.\n- Explore integration with Emacs.\n\nStart by researching the APIs and authentication methods used by each tool. Assess the feasibility of each integration based on existing Raider architecture. Prioritize integrations based on user demand and technical complexity. Create a roadmap for implementation and assign tasks for each integration.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/93", @@ -3891,8 +4027,8 @@ 59 ], "labels": [ - 35, - 42 + 42, + 35 ] } }, @@ -3905,6 +4041,7 @@ "node_id": "I_kwDOGDmZaM5Tj_2X", "title": "Fix github actions", "body": "Currently some github actions don't work as desired:\r\n\r\n- [ ] mypy action doesn't really work\r\n- [ ] what needs to run on dev and what on main\r\n - [ ] decide what events need to trigger actions\r\n - [ ] choose which branch to monitor for what action\r\n- [ ] create an action to publish raider to PyPi", + "summary": "Fix GitHub Actions by addressing the following tasks:\n\n1. **Resolve Mypy Action Issues**:\n - Investigate the current configuration of the mypy action in the workflow YAML file.\n - Ensure that the mypy configuration file (`mypy.ini` or `setup.cfg`) is correctly specified and located.\n - Check for any dependencies or installation issues that might prevent mypy from running.\n\n2. **Determine Action Triggers**:\n - Analyze the project's workflow to decide which actions should run on the `dev` branch versus the `main` branch.\n - List the events that should trigger specific actions (e.g., `push`, `pull_request`, `release`).\n - Document the rationale for choosing certain events for triggering actions to maintain clarity.\n\n3. **Select Branch Monitoring**:\n - Define which branches should be monitored for each action.\n - Update the workflow file to reflect these branch settings, using the `on` directive in the YAML configuration.\n\n4. **Create PyPi Publishing Action**:\n - Develop a new workflow that automates the process of publishing the package to PyPi.\n - Utilize the `twine` tool for uploading distributions, ensuring that credentials are securely managed through GitHub Secrets.\n - Define the events that should trigger this publishing action, typically on releases or merges to `main`.\n\n**First Steps**:\n- Start with reviewing the existing actions in the `.github/workflows` directory.\n- Test the mypy action locally to identify specific error messages.\n- Draft a new YAML configuration for the publishing action, focusing on the necessary steps to build and upload the package.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/94", @@ -3935,6 +4072,7 @@ "node_id": "I_kwDOGDmZaM5TkAqC", "title": "Create an easy way to generate hyfiles from within python", "body": "Since more people are comfortable with Python than Lisp, it would be great to have an easy way to generate those configuration files by working with Python objects directly. Since hyfiles just import those python objects to be used, it shouldn't be too hard, and if achieved, more people will feel comfortable using raider.", + "summary": "Implement a Python-based solution for generating hyfiles. Focus on creating a straightforward interface that allows users to work with Python objects directly, facilitating the conversion to hyfiles. \n\n1. Analyze the current hyfile structure and identify how Python objects can be represented.\n2. Develop a Python module that allows users to define configurations using Python syntax.\n3. Implement a function to serialize these Python objects into the required hyfile format.\n4. Ensure that the generated hyfiles correctly import the Python objects as intended.\n5. Test the implementation with various use cases to guarantee compatibility and reliability.\n\nBy simplifying the process of hyfile generation, increase accessibility for users unfamiliar with Lisp, ultimately enhancing the usability of the raider tool.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/96", @@ -3952,8 +4090,8 @@ 59 ], "labels": [ - 35, - 42 + 42, + 35 ] } }, @@ -3966,6 +4104,7 @@ "node_id": "I_kwDOGDmZaM5TkA5L", "title": "Split operations into groups", "body": "Like Plugins have been split into related groups, it would make sense to do the same to Operations, to make it easier to manage them. ", + "summary": "Split operations into related groups for improved management. Analyze existing operations and categorize them based on functionality or purpose, similar to how plugins are organized. \n\nFirst steps:\n1. Review the current list of operations to identify common themes or functionalities.\n2. Create a proposed structure for grouping operations, ensuring it aligns with user needs and enhances usability.\n3. Implement the grouping in the codebase, ensuring backward compatibility with existing functionality.\n4. Update documentation to reflect the new organization of operations. \n5. Test the changes thoroughly to verify that operations function correctly within their new groups.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/97", @@ -3983,9 +4122,9 @@ 59 ], "labels": [ + 42, 35, - 38, - 42 + 38 ] } }, @@ -3998,6 +4137,7 @@ "node_id": "I_kwDOGDmZaM5TrAG2", "title": "Create a custom dark mode theme for the docs", "body": "I got some feedback from someone who looked at the docs that it would be nice to have a dark mode option. I suggest we quickly create a simple color scheme to differentiate the Raider docs from default Sphinx settings and provide a dark mode option.", + "summary": "Implement a custom dark mode theme for the documentation. \n\n1. Analyze the current Sphinx default theme and identify the color palette used.\n2. Design a new color scheme that enhances readability in low-light environments, focusing on contrasting text and background colors.\n3. Create a new CSS file for the dark mode styles, modifying background colors, text colors, and link colors as necessary.\n4. Update the Sphinx configuration file (`conf.py`) to include the new dark mode CSS file.\n5. Develop a toggle mechanism (e.g., a button or switch) to allow users to switch between light and dark themes.\n6. Test the new theme across different devices and browsers to ensure compatibility and usability.\n7. Gather user feedback to refine the dark mode experience. \n\nBegin with step 1 by reviewing the existing theme and experimenting with potential color combinations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/99", @@ -4028,6 +4168,7 @@ "node_id": "I_kwDOGDmZaM5XKtQ9", "title": "Store Next links as igraph vertices", "body": "For now igraph is barely being used, only the Flows are stored as nodes. To make use of igraph, the links between nodes (Next) need to be somehow stored as vertices.", + "summary": "Implement storage of Next links as igraph vertices. Currently, only Flows are represented as nodes in the igraph structure. Extend the igraph model to include Next links, treating them as vertices to enhance the graph's utility.\n\n1. Review the existing implementation of Flows in igraph to understand how nodes are currently structured.\n2. Define the attributes and properties of the Next links that will be stored as vertices.\n3. Modify the data model to accommodate the new vertex type for Next links.\n4. Update the igraph initialization and graph construction logic to include Next links as vertices.\n5. Test the updated implementation to ensure that Next links are correctly integrated and function as expected within the graph structure.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/105", @@ -4045,8 +4186,8 @@ 59 ], "labels": [ - 35, - 42 + 42, + 35 ] } }, @@ -4059,6 +4200,7 @@ "node_id": "I_kwDOGDmZaM5XvTZR", "title": "Create XPath Plugin", "body": "Add a plugin to extract data using XPath", + "summary": "Create an XPath Plugin to enable data extraction via XPath queries. \n\n1. Define the plugin architecture to support XPath functionality.\n2. Implement core functionality to parse XML documents and evaluate XPath expressions.\n3. Utilize libraries like `lxml` or `xml.etree.ElementTree` for XML parsing and XPath support.\n4. Develop a user interface component for users to input XPath expressions.\n5. Add error handling to manage invalid XPath queries and provide user feedback.\n6. Write unit tests to ensure the plugin correctly extracts data based on various XPath scenarios.\n7. Document the plugin usage and provide examples for common XPath queries.\n\nBegin by setting up a basic plugin structure and integrating an XML parsing library. Then, implement a simple XPath evaluation function to validate the approach.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/109", @@ -4089,6 +4231,7 @@ "node_id": "I_kwDOGDmZaM5X319D", "title": "Create a Wait Operation", "body": "Build an Operation that will wait either for a specified amount of time (sleep) or until user manually continues the process", + "summary": "Create a new Wait Operation that allows for two modes of operation: a timed wait and a manual wait. Implement a function that takes an input parameter for the duration of the sleep in seconds. Use built-in threading or asynchronous features to pause execution for the specified time.\n\nFor the manual continuation, design an interface or command prompt that allows the user to resume the process when ready. Utilize event listeners or signaling mechanisms to capture the user's input effectively.\n\nStart by:\n1. Defining the operation interface with parameters for time duration and mode (sleep/manual).\n2. Implementing the timed wait using `time.sleep()` or `asyncio.sleep()` for asynchronous operations.\n3. Setting up a loop or listener for manual continuation, using input() or GUI elements depending on the application context.\n4. Testing the operation in both modes to ensure correct functionality and user experience.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/111", @@ -4119,6 +4262,7 @@ "node_id": "I_kwDOGDmZaM5X6y-4", "title": "Fix broken PlantUML diagrams", "body": "After upgrading documentation dependencies, the diagrams broke and aren't displayed.", + "summary": "Fix broken PlantUML diagrams. \n\nFirst, identify the specific dependencies that were upgraded and check their release notes for any breaking changes related to PlantUML. \n\nNext, verify the configuration settings in the documentation framework to ensure they align with the latest versions of the dependencies. \n\nInvestigate any changes in syntax or required settings for PlantUML that might have occurred due to the upgrade. \n\nRun tests to reproduce the issue and confirm that diagrams are not displaying. \n\nConsider rolling back to the previous versions of the dependencies temporarily to assess if this resolves the issue. \n\nFinally, update the PlantUML rendering process if necessary, and ensure that all diagram files are correctly formatted and accessible within the project structure.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/113", @@ -4136,8 +4280,8 @@ 59 ], "labels": [ - 38, - 48 + 48, + 38 ] } }, @@ -4150,6 +4294,7 @@ "node_id": "I_kwDOGDmZaM5Yny49", "title": "Remove the need to use :name parameter in Plugins", "body": "Plugins need to have an unique name, and yet it shouldn't be the user's responsibility to give it that name, it could be the variable's name used when defining Plugins, or some automatically generated unique id if it's not set to a variable.", + "summary": "Remove the requirement for the `:name` parameter in Plugins. Ensure that each Plugin has a unique identifier automatically generated from the variable name used during Plugin definition, or create a fallback mechanism that generates a unique ID if no variable name is provided.\n\n1. Investigate the current implementation of Plugin registration and how the `:name` parameter is utilized.\n2. Refactor the code to extract the variable name from the context where the Plugin is defined.\n3. Implement logic to generate a unique ID for Plugins when the variable name is not available.\n4. Update the documentation to reflect these changes and guide users on the new Plugin definition process.\n5. Write unit tests to verify that unique identifiers are correctly assigned to Plugins without the `:name` parameter.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/117", @@ -4167,8 +4312,8 @@ 59 ], "labels": [ - 37, - 42 + 42, + 37 ] } }, @@ -4181,6 +4326,7 @@ "node_id": "I_kwDOGDmZaM5Yn1HA", "title": "Fix multiple users setup", "body": "At the moment after the latest architecture changes, it's not possible anymore to work with multiple users. New CLI option to chose users needs to be implemented too.", + "summary": "Implement support for multiple user setups following recent architecture changes. Address the inability to work with multiple users in the current system. Develop a new CLI option that allows users to select their profiles. \n\nFirst steps:\n1. Review the recent architecture changes to identify breaking points affecting multi-user functionality.\n2. Define the requirements for the new CLI option, including how users will be listed and selected.\n3. Create a prototype for the CLI option, ensuring it integrates well with the existing system.\n4. Test the multi-user functionality to verify that users can switch profiles without issues.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/118", @@ -4198,8 +4344,8 @@ 59 ], "labels": [ - 42, - 48 + 48, + 42 ] } }, @@ -4212,6 +4358,7 @@ "node_id": "I_kwDOGDmZaM5Yn3tI", "title": "Fix logging for Plugins", "body": "Plugins aren't logging information correctly at the moment, it doesn't use the same logger rest of raider does.", + "summary": "Fix the logging mechanism for plugins in the Raider application. Ensure that plugins utilize the same logging framework as the rest of the Raider codebase. \n\n1. Identify the current logging implementation used in plugins and compare it with the standard logging approach used elsewhere in Raider.\n2. Refactor the plugin logging code to adopt the common logging framework, ensuring consistency across the application.\n3. Replace any custom logging functions in plugins with the standardized logger calls.\n4. Test the logging functionality of plugins to verify that logs are recorded correctly and consistently with the rest of the application.\n5. Update documentation to reflect the changes made to the logging system for plugins. \n\nInitiate this process by reviewing the existing logging code in at least one plugin and familiarize yourself with the main Raider logging setup.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/119", @@ -4229,8 +4376,8 @@ 59 ], "labels": [ - 42, - 48 + 48, + 42 ] } }, @@ -4243,6 +4390,7 @@ "node_id": "I_kwDOGDmZaM5Yn5yw", "title": "Store multiple proxies in the common.hy", "body": "Having multiple proxies stored would be useful, and accessing them by their aliases, for example 'burpsuite': 'localhost:8080' and 'zaproxy':'localhost:8081' and switching between them by the alias. http:// prefix shouldn't be a requirement, HTTP proxy is only supported type now.", + "summary": "Implement storage for multiple proxies in `common.hy`. Define a structure to hold proxies with aliases, such as:\n\n```python\nproxies = {\n 'burpsuite': 'localhost:8080',\n 'zaproxy': 'localhost:8081'\n}\n```\n\nEnsure that accessing proxies by their aliases is straightforward, allowing users to switch between them easily without needing an `http://` prefix. \n\nBegin by:\n\n1. Modifying the `common.hy` file to include a dictionary for proxy storage.\n2. Creating functions to add, remove, and retrieve proxies by alias.\n3. Updating existing code to utilize the new proxy structure for connections. \n4. Writing unit tests to verify that adding, retrieving, and switching proxies works correctly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/120", @@ -4260,8 +4408,8 @@ 59 ], "labels": [ - 35, - 42 + 42, + 35 ] } }, @@ -4274,6 +4422,7 @@ "node_id": "I_kwDOGDmZaM5Yn6fi", "title": "Add macros and document creating own macros", "body": "Add more macros to make configuration easier and document them.", + "summary": "Implement additional macros to simplify configuration processes. Ensure thorough documentation for creating custom macros is provided.\n\n**First Steps:**\n1. Identify key areas within the codebase where macros can enhance configuration.\n2. Develop new macros that address these areas, focusing on improving usability and efficiency.\n3. Create a comprehensive guide that details the steps for users to create their own macros, including examples and best practices.\n4. Review existing macros to ensure consistency in naming conventions and functionality.\n5. Test the new macros and documentation for clarity and effectiveness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/121", @@ -4305,6 +4454,7 @@ "node_id": "I_kwDOGDmZaM5YoLYQ", "title": "Use consistent CamelCase across the codebase", "body": "Some identifiers don't use CamelCase so it's confusing when having to guess which case it's in.", + "summary": "Identify inconsistencies in identifier naming conventions throughout the codebase. Ensure all identifiers adhere to CamelCase formatting. \n\n1. Scan the codebase for identifiers that do not follow CamelCase, such as variable names, function names, and class names.\n2. Create a list of all affected identifiers for easy reference.\n3. Implement a script or use automated tools to detect and highlight non-CamelCase identifiers.\n4. Refactor the identified identifiers to conform to CamelCase standards, ensuring to update any references throughout the code.\n5. Run tests to confirm no functionality is broken due to the renaming.\n6. Document the new naming convention in the project's contribution guidelines to promote consistency among future contributions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/123", @@ -4335,6 +4485,7 @@ "node_id": "I_kwDOGDmZaM5YuD93", "title": "Fix issues reported by pylint", "body": "After latest architecture changes the new code hasn't been tested with pylint, and it returns many issues for now that is worth looking at.", + "summary": "Fix pylint issues resulting from recent architecture changes. Run pylint on the new codebase to identify all reported issues. Review the output and categorize problems based on severity and type (e.g., syntax errors, style violations, potential bugs). \n\nStart with the following steps:\n1. Install pylint if not already done using `pip install pylint`.\n2. Execute pylint on the project directory: `pylint `.\n3. Document the issues reported, prioritizing them for resolution.\n4. Begin resolving the most critical issues first, such as syntax errors and major style violations.\n5. Commit changes and re-run pylint to ensure issues are addressed and no new problems are introduced. \n6. Continue this process iteratively until all issues are resolved.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/126", @@ -4365,6 +4516,7 @@ "node_id": "I_kwDOGDmZaM5Y7thA", "title": "Write hylang/LISP case study", "body": "Explain why LISP was the best choice and why macros are useful:\r\n- [ ] Why LISP\r\n- [ ] Why hylang\r\n- [ ] What are Macros\r\n- [ ] How to use Macros", + "summary": "Create a case study focusing on Hylang and LISP. Address the following points:\n\n1. **Justify the Choice of LISP**: Explain its historical significance, expressiveness, and suitability for symbolic computation and rapid prototyping.\n\n2. **Highlight the Advantages of Hylang**: Discuss its integration with Python, ease of use for Python developers, and its Lisp-like syntax that enhances productivity.\n\n3. **Define Macros**: Clarify what macros are in LISP, emphasizing their role in code transformation and metaprogramming.\n\n4. **Demonstrate Macro Usage**: Provide examples of how to define and use macros in Hylang, showcasing their ability to extend the language and create domain-specific languages.\n\n**First Steps to Tackle the Problem**:\n- Research the historical context and unique features of LISP.\n- Compare Hylang with traditional LISP dialects and other programming languages.\n- Explore the macro system in LISP and Hylang, including syntax and common patterns.\n- Compile illustrative examples to demonstrate macro capabilities in practice.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/128", @@ -4395,6 +4547,7 @@ "node_id": "I_kwDOGDmZaM5Y7t9H", "title": "Write a HOWTO on writing macros", "body": "Write a short introduction about how to use macros with raider.", + "summary": "Create a HOWTO document on writing macros for Raider. Start with a brief introduction explaining the purpose of macros in Raider, emphasizing their utility in automating repetitive tasks and enhancing workflow efficiency.\n\nOutline the following steps for writing macros:\n\n1. **Set Up Raider Environment**: Ensure Raider is properly installed and configured on your system.\n\n2. **Access Macro Editor**: Open Raider and navigate to the macro editor interface. Familiarize yourself with the layout and available options.\n\n3. **Define Macro Functions**: Use the provided syntax to define macro functions. Include examples of common functions, such as triggering events or executing commands.\n\n4. **Record a Macro**: Demonstrate how to record a macro by performing actions within Raider. Explain how to start, pause, and stop the recording process.\n\n5. **Edit a Macro**: Show how to edit the recorded macro for optimization. Discuss the importance of commenting within the code for clarity.\n\n6. **Test the Macro**: Guide users on how to test the macro in a controlled environment to ensure it works as intended, and how to troubleshoot common issues.\n\n7. **Save and Share Macros**: Explain how to save macros for future use and share them with other users, if applicable.\n\n8. **Best Practices**: Conclude with best practices for writing efficient and maintainable macros, such as keeping them modular and avoiding hard-coded values.\n\nEncourage readers to explore further customization options and contribute to the Raider community by sharing their macro creations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/129", @@ -4425,6 +4578,7 @@ "node_id": "I_kwDOGDmZaM5Y7uYz", "title": "Document existing raider macros", "body": "Add documentation explaining how existing raider macros work", + "summary": "Document existing raider macros. Include detailed explanations of the functionality and usage of each macro. Break down the components of the macros, including any parameters, return values, and examples of how they can be utilized in different scenarios. \n\nStart by identifying all existing raider macros in the codebase. Create a structured format for the documentation, such as a markdown file or wiki page, to ensure consistency. For each macro, provide a high-level overview, followed by a technical breakdown.\n\nConsider implementing the following steps:\n1. Review the current codebase to locate all macros related to raider functionality.\n2. Analyze the code to understand how each macro operates and its purpose.\n3. Write clear and concise documentation for each macro, including code snippets.\n4. Create a README or index page that links to the documentation for easy navigation.\n5. Review the documentation for clarity and accuracy before finalizing.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/130", @@ -4455,6 +4609,7 @@ "node_id": "I_kwDOGDmZaM5Y7uzJ", "title": "Add docstrings to all new code", "body": "The new code has no docstrings yet, to build a complete documentation those needs to be set up.", + "summary": "Add docstrings to all new code to ensure comprehensive documentation. Review each function, class, and module to provide clear descriptions of their purpose, parameters, return values, and any exceptions raised. \n\nFirst, identify all new code files that lack docstrings. Next, establish a consistent format for docstrings, following the conventions of the project (e.g., Google style or NumPy style). Then, incrementally add docstrings to each identified code element, ensuring clarity and completeness. Finally, run documentation generation tools (like Sphinx) to verify that the new docstrings are incorporated correctly into the overall project documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/131", @@ -4485,6 +4640,7 @@ "node_id": "I_kwDOGDmZaM5ZPBx9", "title": "Fix logging for utils", "body": "Like Plugins, utils isn't using the logger properly so no log messages are being shown for example when reading hyfiles", + "summary": "Fix the logging implementation in the utils module. Ensure that the logging framework is correctly integrated to display log messages, particularly during the reading of hyfiles. \n\n1. Review the current logging setup in the utils module.\n2. Identify areas where logging calls are missing or incorrectly implemented.\n3. Implement appropriate logging calls at key points in the code, such as before and after reading hyfiles.\n4. Test the logging functionality to confirm that messages are displayed as expected.\n5. Update documentation to reflect any changes made in the logging process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/136", @@ -4502,8 +4658,8 @@ 59 ], "labels": [ - 44, - 48 + 48, + 44 ] } }, @@ -4516,6 +4672,7 @@ "node_id": "I_kwDOGDmZaM5Zm8cj", "title": "Create a way to use GraphQL within hylang files", "body": "Either some macros or plugins will be needed to improve the hyfiles when dealing with graphql", + "summary": "Implement GraphQL support in hylang files by developing macros or plugins. This will enhance the handling of GraphQL queries and mutations within Hy syntax.\n\n1. Analyze the current Hy syntax and identify key areas where GraphQL integration is necessary.\n2. Design macros that can simplify the construction of GraphQL queries, ensuring they translate properly into the underlying Python code.\n3. Consider creating a plugin that can manage GraphQL schemas, types, and resolvers, making it easier for developers to work with GraphQL in Hy.\n4. Develop unit tests to ensure the reliability of the new macros or plugins, particularly focusing on edge cases in GraphQL operations.\n5. Document the usage of the new features clearly for users of hylang, providing examples of common GraphQL operations in Hy syntax.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/141", @@ -4533,8 +4690,8 @@ 59 ], "labels": [ - 37, - 44 + 44, + 37 ] } }, @@ -4547,6 +4704,7 @@ "node_id": "I_kwDOGDmZaM5ZsajZ", "title": "Create a raider REPL", "body": "To preserve a session across runs, raider would need to have a REPL, and the user should have the ability to inspect and manipulate all individual elements.", + "summary": "Implement a Raider REPL to enable session preservation across runs. Ensure the REPL allows users to inspect and modify individual elements within the session.\n\n1. Define the REPL interface and establish how users will interact with it.\n2. Integrate session management functionality to save and load session states.\n3. Implement inspection capabilities, allowing users to view current elements and their states.\n4. Add manipulation features to enable users to modify elements dynamically.\n5. Test the REPL for user experience and ensure all functionalities work as intended.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/raider/issues/142", @@ -4564,8 +4722,8 @@ 59 ], "labels": [ - 35, - 42 + 42, + 35 ] } }, @@ -4578,6 +4736,7 @@ "node_id": "I_kwDOGDKlFs5mtsUw", "title": "migrate content to www-project repo", "body": "There is a trend in OWASP have the documentation projects accessible via the project web pages, and it may be that this repo could be combined into the [www-project-threat-modeling-playbook](https://github.com/OWASP/www-project-threat-modeling-playbook) repo\r\n\r\nwe could then browse the pages in a similar manner to the [Secure Coding Practices](https://owasp.org/www-project-secure-coding-practices-quick-reference-guide/stable-en/) document", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-modeling-playbook/issues/1", @@ -4600,10 +4759,11 @@ "pk": 160, "fields": { "nest_created_at": "2024-09-11T19:29:59.579Z", - "nest_updated_at": "2024-09-11T19:29:59.579Z", + "nest_updated_at": "2024-09-13T15:08:50.458Z", "node_id": "I_kwDOGDKlFs5mts0i", "title": "provide pdf and epub", "body": "We provide pdf and epub versions of the document in the [OWASP Developer Guide](https://owasp.org/www-project-developer-guide/) as part of the commit workflow\r\n\r\nThis can be done for the OTMP as well if this is useful", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-modeling-playbook/issues/2", @@ -4630,6 +4790,7 @@ "node_id": "MDU6SXNzdWU5ODI3MjEwNDk=", "title": "make frontend show Graph", "body": "there's an experimental \"show CREs in a graph\" frontend page, hook it up to the relevant rest calls", + "summary": "The issue discusses the need to connect an experimental frontend page that displays CREs in a graph format with the appropriate REST API calls. To address this, the relevant integration between the frontend and the backend needs to be established.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/7", @@ -4658,6 +4819,7 @@ "node_id": "MDU6SXNzdWU5ODI3NDE1NjE=", "title": "Add pre-commit hooks", "body": "add pre commit hooks for\r\n` black . `\r\n`mypy --strict --exclude venv/ . `\r\n`FLASK_APP=cre.py FLASK_CONFIG=test flask test | grep -i FAIL ` # there has to be a better way to ensure tests pass\r\n` if [[ $(FLASK_APP=cre.py FLASK_CONFIG=test flask test --coverage | grep TOTAL | awk '{print $6}' | tr -d \"%\" ) -lt 70 ]] then \r\n exit 1 \r\nfi` # there has to be a better way to ensure coverage \r\n\r\n", + "summary": "The issue discusses the need to implement pre-commit hooks for several tasks, including formatting code with `black`, type checking with `mypy`, running Flask tests, and checking test coverage. The author expresses a desire for more efficient methods to ensure tests pass and to verify code coverage meets a minimum threshold of 70%. To address this issue, the relevant pre-commit hooks should be created and optimized for the mentioned tasks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/13", @@ -4689,6 +4851,7 @@ "node_id": "MDU6SXNzdWU5ODI3Nzk4ODQ=", "title": "Create production docker container", "body": "start with distroless\r\nhttps://medium.com/@luke_perry_dev/dockerizing-with-distroless-f3b84ae10f3a", + "summary": "The issue requests the creation of a production Docker container using a distroless image. A reference article is provided for guidance on the process. To address this, one needs to follow the outlined steps in the article to properly configure and build the Docker container.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/15", @@ -4715,6 +4878,7 @@ "node_id": "MDU6SXNzdWU5ODgzNDA2MzQ=", "title": "Get rid of default LinkType:Same", "body": "with then new linktypes introduced in issue: 29 there is no need for Same to be the default anymore, remove it", + "summary": "The issue suggests removing the default LinkType \"Same\" since new link types have been introduced that make it unnecessary. It is implied that adjustments to the code or configuration are needed to eliminate \"Same\" as the default option.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/34", @@ -4741,6 +4905,7 @@ "node_id": "MDU6SXNzdWU5OTIwODMzNzE=", "title": "Setup email forwarding in the domain to the whole team and change contact info on the landing page to info@opencre.org", "body": "", + "summary": "The issue requests the setup of email forwarding for the entire team using the domain and updating the contact information on the landing page to reflect the new email address info@opencre.org. To address this, the necessary configurations for email forwarding should be implemented, and the landing page content should be updated accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/46", @@ -4771,6 +4936,7 @@ "node_id": "MDU6SXNzdWU5OTIxNjEzMjI=", "title": "Data: complete standard hyperlinks and names", "body": "After having decided between v2 and v3:\r\nadd urls for CWE, OPC, NIST(Torsten) and WSTG, and nice names for the cheat sheets\r\n", + "summary": "The issue discusses the need to finalize the choice between v2 and v3 and subsequently add complete standard hyperlinks and appropriate names for various resources, including CWE, OPC, NIST, and WSTG cheat sheets. The next steps involve selecting the version and then updating the documentation with the required URLs and names.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/49", @@ -4801,6 +4967,7 @@ "node_id": "MDU6SXNzdWU5OTM5MzQwMjQ=", "title": "Docker Improvements", "body": "build frontend\r\ninstall deps for python without building them from scratch\r\n\r\noffer production option where application runs in a distroless container\r\nimprove docker entrypoint to optionally run migrations on launch\r\ndocument required env vars to point to database outside container\r\n", + "summary": "The issue discusses several proposed improvements for the Docker setup of a project. Key points include building the frontend more efficiently, installing Python dependencies without building them from scratch, and offering a production option that utilizes a distroless container. Additionally, it suggests enhancing the Docker entrypoint to support optional migration execution on launch and documenting the necessary environment variables for connecting to an external database. Addressing these points will likely improve the overall efficiency and usability of the Docker implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/65", @@ -4831,6 +4998,7 @@ "node_id": "I_kwDOF9wO7c474T0Z", "title": "Switch to new css framework", "body": "## Issue\r\n\r\nCurrently, [Semantic-ui](https://semantic-ui.com/) is used as the main CSS framework. The library hasn't seen a new release/update since 2018. Not anything with priority but would be better to use a styling framework that is still supported and known, so a wider community has the ability to give the CRE project a boost :) .", + "summary": "The issue discusses the need to transition from the outdated Semantic-ui CSS framework, which has not been updated since 2018, to a more current and supported styling framework. This change would enhance community engagement and support for the project. It is suggested that switching to a well-known framework could benefit the project's development. Consider researching and evaluating alternative CSS frameworks for this transition.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/105", @@ -4857,6 +5025,7 @@ "node_id": "I_kwDOF9wO7c480wFK", "title": "Provide ability to link directly to a standard item through the url", "body": "Provide ability to link directly to a standard item through the url, in two ways:\r\n1. eg opencre.org/123-456&deeplink=ASVS\r\nThis forwards the user from opencre, without rendering a CRE page, to the hyperlink that belongs to that standard\r\n2. eg opencre.org/bypasslink/asvs/v6.1.1/encrypt%20personal%data%at%rest\r\nThis also forwards to that asvs page based on our database AND it asks for a description of the topic. We collect those, so we can see what standard entries we apparently have no topic for (which would be the only reason to use method 2 instead of method 1. Stil that last argument is optional. ", + "summary": "The issue requests the implementation of a feature that allows users to link directly to a standard item via a URL in two distinct ways. The first method involves a simple forwarding to the standard's hyperlink without rendering a page. The second method not only forwards to a specific page but also prompts the user for a description of the topic, which can help identify gaps in the existing topic entries. To address this issue, the development team should consider implementing both linking methods as described.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/116", @@ -4885,6 +5054,7 @@ "node_id": "I_kwDOF9wO7c482_D1", "title": "Node.js API wrapper", "body": "## User Story\r\n\r\n**As the** lead developer of OWASP Juice Shop\r\n**I want** to include a Node.js module that wraps the CRE API into a programmatically usable library\r\n**so that** I do not have to worry about putting together API calls manually\r\n**and** do not need to manually crawl JSON responses for the links I am interested in.\r\n\r\n### Expected Behaviour\r\n\r\nModule will be kept up-to-date with API changes.", + "summary": "The issue outlines a request to create a Node.js module that serves as a wrapper for the CRE API, enabling easier and more efficient interaction with the API by eliminating the need for manual API calls and JSON response parsing. The module should also be maintained to ensure it remains current with any changes to the API. To address this, a development plan for building and maintaining the module should be formulated.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/117", @@ -4911,6 +5081,7 @@ "node_id": "I_kwDOF9wO7c4_DNE2", "title": "Ingest owasp metadata for projects and make CRE mappings", "body": "## Issue\r\nCRE data is nice for finding out what is the problem and what regulations it solves\r\n but it doesn't show how to test for it or actually solve it.\r\nMeanwhile owasp has exported data that tells you which projects exist and mostly, what they do.\r\nlet's combine the two\r\n\r\n### Expected Behaviour\r\nmake document type project\r\nmake frontend page project\r\ningest data from here https://raw.githubusercontent.com/OWASP/owasp.github.io/main/_data/projects.json\r\nassign to CREs\r\n????\r\nproffit\r\n\r\n### Actual Behaviour\r\nthe aboe doesn't exist\r\n", + "summary": "The issue discusses the need to integrate OWASP metadata for various projects with existing CRE (Common Vulnerabilities and Exposures) mappings. While CRE data helps identify problems and relevant regulations, it lacks guidance on testing and resolution. The proposal is to create a document type for projects, develop a frontend page, and ingest data from the provided OWASP projects JSON file to assign it to CREs. Currently, this functionality does not exist. \n\nTo move forward, the following tasks should be addressed: creating the project document type, developing the frontend page, and implementing the data ingestion and assignment process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/126", @@ -4942,6 +5113,7 @@ "node_id": "I_kwDOF9wO7c5CL1yw", "title": "add SKF data", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nSKF has a knowledge base and code examples\r\nwe could add the relevant SKF knowledge base items (MASVS, ASVS and custom descriptions) to CRE.\r\nLet's do this ", + "summary": "The issue proposes adding relevant SKF knowledge base items, including MASVS, ASVS, and custom descriptions, to the CRE platform. To address this, the necessary SKF content should be identified and integrated into the system.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/145", @@ -4970,6 +5142,7 @@ "node_id": "I_kwDOF9wO7c5C7XxK", "title": "Auto data health checks", "body": "once the auto-backups are in place\r\ncreate an action that downloads the latest database from heroku and compares it to the latest known good state.\r\nIf the known good state data isn't equivalent to the heroku ones, fail.\r\n\r\nThis might be possible by just dumping dbs, filtering out heroku custom things and checking for string equivalence", + "summary": "The issue discusses implementing automated data health checks following the establishment of auto-backups. The proposed solution involves creating an action that downloads the latest database from Heroku and compares it to the latest known good state. If discrepancies are found between the known good state and the Heroku data, the process should fail. It suggests that this could potentially be achieved by dumping the databases, filtering out Heroku-specific elements, and checking for string equivalence. \n\nTo move forward, the task would involve developing the action to facilitate this comparison and failure mechanism.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/149", @@ -5000,6 +5173,7 @@ "node_id": "I_kwDOF9wO7c5D-qSb", "title": "Browse topics feature is too hidden", "body": "### **What is the issue?**\r\n\r\nThe show high level topics link is hidden in the text of the front page, whereas it should be a prominent feature - perhaps left of the search text with a button that says Browse topics, or perhaps just 'Topics'\r\nhttps://www.opencre.org/search/%3E%3E\r\n\r\nPerhaps we want to make it faster first (#154 )\r\n\r\n", + "summary": "The \"Browse topics\" feature is currently not prominently displayed on the front page, making it difficult for users to find. It is suggested that this feature be made more visible, possibly by placing it to the left of the search text with a clear button labeled \"Browse topics\" or simply \"Topics.\" Additionally, there is a mention of the need to improve speed, which may be addressed in a related issue. \n\nTo enhance user experience, consider redesigning the placement of the \"Browse topics\" link and ensuring it stands out on the page.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/155", @@ -5026,6 +5200,7 @@ "node_id": "I_kwDOF9wO7c5EjnQx", "title": "Add export functionality", "body": "Based on OWASP/www-project-integration-standards#25:\r\n\r\n> Given a REST call, add the ability to export the results into a google sheets spreadsheet which allows humans to get the mappings into a CSV format. This is already partly implemented (`application/utils/spreadsheet.py`) but we haven't implemented any API plumbing for it.\r\n\r\nIn the FE, that could be implemented as a Button (see below).\r\nIt should handle CRE pages, standard pages and generic search pages.\r\n\r\n![pic](https://user-images.githubusercontent.com/6052785/145883219-498b82ee-b0fd-412e-a007-0182edfa3104.png)", + "summary": "The issue requests the implementation of an export functionality that allows users to export REST call results into a Google Sheets spreadsheet, facilitating the conversion into CSV format. While there is partial implementation in `application/utils/spreadsheet.py`, additional API integration is needed. The front end should include a button to trigger this functionality, applicable for CRE pages, standard pages, and generic search pages. The next steps involve completing the API plumbing and designing the button for the user interface.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/161", @@ -5052,6 +5227,7 @@ "node_id": "I_kwDOF9wO7c5F-pVX", "title": "Add a browse button", "body": "Add a browse button with the same styling as the search button and with a little bullet list as an icon. Put it right of the \"Open CRE\" tab and let it link to the root CREs - when that one is fixed.", + "summary": "The issue requests the addition of a browse button styled similarly to the search button, featuring a bullet list icon. This button should be positioned to the right of the \"Open CRE\" tab and should link to the root CREs once resolved. Implementation of this feature is needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/180", @@ -5080,6 +5256,7 @@ "node_id": "I_kwDOF9wO7c5Gig7y", "title": "Clean up topic page and make more clear", "body": "Based on analysis and interviews, the following low hanging fruit has been identified to make the topics page more clear and lean:\r\n-Remove “tags: “line\r\n-Remove “CRE:xxx-xxx”: at the beginning of every line\r\n-Also remove “CRE:” prefix (instead of “CRE: >>Secure data storage” say “Secure data storage” because we should not call topics “CRE’s”. It’s not forbidden to do so, but it’s jargon.\r\n-Move “bla bla contains” block more to the top, below the “is linked to” block\r\n-For topic pages: change “is linked to” to “refers to”\r\n-For standard pages: change “is linked to” to “is referenced by”\r\n-Remove xxx-xxx Before every hyperlink to a CRE topic: Instead of “163-776 Backups”, write “Backups”\r\n-If easy to do: indent all border blocks so that titles stand out more clear as separators of content: “Bla bla related to”\r\n-By default: collapse topics that are ‘related to’\r\n-Change “is part of” to “is child of”:\r\n-Add a hyperlink icon next to references on a topic page, so you get taken directly to the content", + "summary": "The issue suggests several improvements to enhance clarity and simplicity on the topics page. Key recommendations include removing unnecessary prefixes and jargon, adjusting the placement of certain blocks, and changing specific terminology for consistency. Additionally, it proposes visual adjustments like indenting border blocks and collapsing related topics by default. Finally, adding a hyperlink icon next to references is also suggested for easier navigation. Implementing these changes would lead to a more user-friendly experience.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/186", @@ -5108,6 +5285,7 @@ "node_id": "I_kwDOF9wO7c5Gkk9r", "title": "Add topic descriptions", "body": "Add a description field to topics in the UI and in the data. example: HTTP security headers: \"Configuration of the web server putting special information in the http communication, for example content security policies\"", + "summary": "The issue proposes adding a description field to topics within the user interface and data. This would provide additional context for each topic, like the example given for HTTP security headers, which explains its purpose and configuration. Implementing this feature would enhance user understanding of various topics.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/187", @@ -5136,6 +5314,7 @@ "node_id": "I_kwDOF9wO7c5Gl-qa", "title": "'Wrong secrets' seems empty because doesn't expand", "body": "When arriving at the Wrong secrets project, the title doesn't expand, so the hyperlink is not shown and users may believe they are at a dead end.\r\nhttps://www.opencre.org/node/tool/OWASP%20WrongSecrets/", + "summary": "The issue highlights that the \"Wrong secrets\" project title does not expand, resulting in the hyperlink not being visible. This could lead to users thinking they have reached a dead end. To address this, it is suggested to ensure that the project title is properly expandable to display the hyperlink.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/188", @@ -5166,6 +5345,7 @@ "node_id": "I_kwDOF9wO7c5HJZZs", "title": "CRE does not contain the Att&ck framework", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nWe are missing the Att&ck framework, it could be used to add more CREs and easily map more tools, code snippets etc.\r\nShould be relatively easy to import via https://attack.mitre.org/resources/working-with-attack/", + "summary": "The issue highlights the absence of the ATT&CK framework within the current system, which limits the ability to incorporate additional CREs and effectively map tools and code snippets. To address this, it is suggested to import the framework using the resources available on the MITRE ATT&CK website.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/192", @@ -5192,6 +5372,7 @@ "node_id": "I_kwDOF9wO7c5HJZ2a", "title": "CRE does not contain the def3nd framework", "body": "## Issue\r\n\r\n### **What is the issue?**\r\nwhat the title says", + "summary": "The issue states that the CRE does not include the def3nd framework. To address this, the framework needs to be integrated into the CRE.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/193", @@ -5218,6 +5399,7 @@ "node_id": "I_kwDOF9wO7c5Hct5v", "title": "Provide Trusted Contributors with the ability to import data in opencre.org", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nCurrently, new data in opencre.org can be inputted by way of a code-maintainer running an import script.\r\nThis is inefficient as it greatly restricts who can import data and forces a hard coupling between code maintainers and data authors.\r\n\r\n\r\n### Expected Behaviour\r\n\r\nData authors should have a strongly authenticated UI which allows them to easily mass-import data into opencre.org\r\nThey should also be given the freedom to either manipulate or replace individual mappings.\r\nHowever, special care should be taken to preserve CRE immutability.\r\n\r\n### Possible implementations:\r\n#### Admin panel connected to an idp\r\n\r\nThere is an admin page which uses open id connect and an external identity provider (e.g. okta) to authenticate a data author.\r\nData authors could provide an email address which when added to an identity provider solution gives them access to said admin panel, from there they can either provide the url of a spreadsheet that fits the [mapping template](https://docs.google.com/spreadsheets/d/1f47aZydJ47n-iGb0fkmu880wSaFyYDM-zdkgs6oMV7I/edit?usp=sharing) and the backend runs the equivalent of `python cre.py --add --from-spreadsheet `\r\n\r\nPros: industry standards solution, on-prem users can hook their own idp\r\nCons: needs an external idp solution, maintenance burden.\r\n\r\n\r\n", + "summary": "The issue outlines the need to enhance the data import process for opencre.org by allowing Trusted Contributors to import data directly through a user-friendly interface. Currently, only code maintainers can run the import script, which limits flexibility and creates unnecessary dependency between maintainers and data authors.\n\nThe expected behavior is to create a secure, authenticated UI for data authors to mass-import data, with options to manipulate or replace individual mappings while ensuring the integrity of CRE immutability.\n\nOne proposed implementation involves developing an admin panel that integrates with an identity provider (IDP) for authentication. Data authors could access this panel and submit a spreadsheet URL that adheres to a specified mapping template, enabling automated data import.\n\nTo move forward, additional implementation alternatives are needed that might address the cons of using an external IDP while still meeting the project's requirements.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/194", @@ -5244,6 +5426,7 @@ "node_id": "I_kwDOF9wO7c5Hdy_x", "title": "Create an automated parser for the core rule set", "body": "## Issue\r\nThere is no automatic way to import core rule set rules\r\nCRS project exports their rules in JSON .\r\nWe should be parsing this and auto-exporting them.\r\n", + "summary": "The issue highlights the absence of an automated method to import the core rule set rules, which are currently exported in JSON format. The task involves creating a parser that can read these JSON files and facilitate the auto-exporting of the rules. A link to the CRS project is mentioned for reference. Development of this parser is essential to streamline the import process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/196", @@ -5272,6 +5455,7 @@ "node_id": "I_kwDOF9wO7c5Imsfi", "title": " Make frontend show gap analysis", "body": "## Issue\r\nWe have a functional gap analysis feature in the backend, let's build a frontend page which can show it.\r\nThis will need significant thinking and design on presentation before we get to building\r\n", + "summary": "The issue involves creating a frontend page to display the existing functional gap analysis feature from the backend. It requires careful consideration and design regarding how the information will be presented before proceeding with the development. A design plan should be drafted to guide the implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/203", @@ -5300,6 +5484,7 @@ "node_id": "I_kwDOF9wO7c5ImtCH", "title": "Make frontned able to import from spreadsheet template.", "body": "## Issue\r\nchild of #194 , the trusted contributor import page should allow contributors to import from a spreadsheet template for mappings that don't have parsers yet", + "summary": "The issue discusses enhancing the frontend to enable contributors to import data using a spreadsheet template. This feature is aimed at facilitating the import process for mappings that currently lack parsers. To address this, the implementation of a user-friendly import function for the specified template is needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/204", @@ -5326,6 +5511,7 @@ "node_id": "I_kwDOF9wO7c5ImtM8", "title": "make pagination also for tag results", "body": "## Issue\r\nget by tag is not paginated, we don't have a lot of tags yet so for now its performant, but paginate it so that it's faster with more data", + "summary": "The issue addresses the lack of pagination for results retrieved by tags. While current performance is acceptable due to the limited number of tags, it is suggested to implement pagination to enhance efficiency as the data grows. The recommended action is to add pagination functionality for tag results.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/205", @@ -5352,6 +5538,7 @@ "node_id": "I_kwDOF9wO7c5LRihE", "title": "auto parse CWE", "body": "## Issue\r\n\r\n### **What is the issue?**\r\nCWE is a framework which is meant to be parsed automatically, however, we add the mappings manually from a spreadsheet.\r\nParse CWE from here https://cwe.mitre.org/data/downloads.html\r\n", + "summary": "The issue revolves around the need to automate the parsing of the Common Weakness Enumeration (CWE) framework instead of manually adding mappings from a spreadsheet. The proposed solution is to retrieve and parse the CWE data from the provided link. Action is required to implement this automated parsing process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/219", @@ -5380,6 +5567,7 @@ "node_id": "I_kwDOF9wO7c5XVVeu", "title": "Get User Feedback and perform usability research for the frontend", "body": "Our frontend works but that's about it. We are presenting quite a lot of data in a not very organised way.\r\nWe could benefit from data-presentation architecture.\r\n\r\nA good way to do this would be to talk to potential users and get feedback on what a good frontend would look like", + "summary": "The issue highlights the need for improved organization and presentation of data in the frontend. To enhance usability, it suggests conducting user feedback sessions to gather insights on what an effective frontend should entail. Engaging with potential users will help identify specific areas for improvement and inform the design of a more user-friendly interface. It is recommended to initiate user interviews or surveys to collect this valuable feedback.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/241", @@ -5406,6 +5594,7 @@ "node_id": "I_kwDOF9wO7c5XVVg9", "title": "Redesign frontend based on user feedback", "body": "after #241 is done we should action this user feedback by redesigning our frontend appropriately", + "summary": "The issue discusses the need to redesign the frontend in response to user feedback, which should be addressed after the completion of a related task (#241). Actioning the feedback will involve making appropriate changes to enhance the user interface.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/242", @@ -5432,6 +5621,7 @@ "node_id": "I_kwDOF9wO7c5XVWac", "title": "add filtering to the frontend", "body": "currently, the only way to do filtering is to know the exact url you need to input, which is not great ux, a good idea would be to add a drop down next to the search button that somehow allows constructing the filtering query from the frontend", + "summary": "The issue suggests enhancing the user experience by adding a filtering feature to the frontend. Currently, users must know the exact URL for filtering, which is not user-friendly. The proposed solution is to implement a dropdown next to the search button that allows users to construct filtering queries more intuitively. It would be beneficial to explore options for this dropdown implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/243", @@ -5458,6 +5648,7 @@ "node_id": "I_kwDOF9wO7c5XVWgP", "title": "Add gap analysis capability to the frontend", "body": "currently, our backend allows for performing gap analysis exercises between any two standards we support.\r\nwe should:\r\n1. revisit the implementation with some GRC people and validate that our implementation of gap analysis is correct.\r\n2. add a frontend feature that allows for gap analysis to be performed from the frontend", + "summary": "The issue suggests enhancing the frontend to include gap analysis capabilities, which are currently available only through the backend. The proposed steps include validating the existing gap analysis implementation with GRC experts and developing a frontend feature to enable users to perform gap analysis directly from the user interface.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/244", @@ -5486,6 +5677,7 @@ "node_id": "I_kwDOF9wO7c5ktiub", "title": "Create an API endpoint which given a Standard JSON, suggests which CREs are the most likely mappings for this standard section", "body": "As an opencre or standard maintainer, I want to be able to import new standards or other security information very quickly in the CRE graph.\r\nGiven I have this information in a [CRE Standard](https://github.com/OWASP/common-requirement-enumeration/blob/main/application/defs/cre_defs.py#L384) format, I need to find which CRE is best suited for linking with a specific section of my standard.\r\nIn order to do that I need an API call POST [opencre.org/rest/v1/link/suggest](http://opencre.org/rest/v1/link/suggest) where the body of the api call is a CRE Standard in json format.\r\nOpenCRE then needs to reply with a json array of CREs that are best suited to match my standard.\r\ne.g.\r\n POST [opencre.org/rest/v1/link/suggest](http://opencre.org/rest/v1/link/suggest)\r\n\r\n```{\"doctype\":\"Standard\",\"hyperlink\":\"https://github.com/OWASP/ASVS/blob/v4.0.2/4.0/en/0x11-V2-Authentication.md\",\"links\":[],\"name\":\"ASVS\",\"section\":\"Verify that the challenge nonce is at least 64 bits in length, and statistically unique or unique over the lifetime of the cryptographic device.\",\"sectionID\":\"V2.9.2\"}]```\r\n\r\nResponse:\r\n\r\n``` [{\"doctype\":\"CRE\",\"id\":\"287-251\",\"name\":\"Use a unique challenge nonce of sufficient size\",\"tags\":[\"Cryptography\"]},{\"doctype\":\"CRE\",\"id\":\"287-252\",\"name\":\"Some other CRE\",\"tags\":[\"Cryptography\"]}] ```\r\n\r\n", + "summary": "The issue requests the creation of an API endpoint that takes a standard JSON object and suggests the most suitable Common Requirement Enumerations (CREs) for a specific section of that standard. The endpoint should be a POST request to `opencre.org/rest/v1/link/suggest`, with the body containing the CRE Standard in JSON format. The expected response is a JSON array of recommended CREs that align with the provided standard section. Implementation of this functionality would facilitate quicker integration of new standards into the CRE graph.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/275", @@ -5512,6 +5704,7 @@ "node_id": "I_kwDOF9wO7c5k3WTA", "title": "By default: collapse linked sources if they contain more than x (say 5) entries", "body": "By default collapsed linked sources if they contain more than x (say 5) entries\r\nTo avoid clutter", + "summary": "The issue suggests implementing a feature to automatically collapse linked sources when they contain more than a specified number of entries (e.g., 5) to reduce visual clutter. A possible action would be to set up a default threshold for collapsing these sources within the application.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/283", @@ -5544,6 +5737,7 @@ "node_id": "I_kwDOF9wO7c5l1vzb", "title": "create api call which takes a standard object and suggests which CRE it should match to", "body": "currently adding mappings requires the new standard to either map to an existing standard or manual mapping.\r\nthis is inefficient, we can use embeddings to automatically find which CRE each standard section links to .\r\n\r\ndesign an api call which when you POST a standard document with it's section content, it suggests which CRE it should map to\r\n\r\n(preparation for ability to add mappings)", + "summary": "The issue discusses the need to create an API call that takes a standard object and suggests the appropriate CRE (Common Regulatory Element) it should match to. Currently, the process of adding mappings is inefficient, requiring either a link to an existing standard or manual input. The proposal involves utilizing embeddings to automatically identify connections between standard section content and CREs. The task is to design an API endpoint that, when a standard document is posted, will provide suggestions for mapping to the relevant CRE. This development is crucial for streamlining the mapping process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/285", @@ -5574,6 +5768,7 @@ "node_id": "I_kwDOF9wO7c5oZWJq", "title": "create swagger or .proto definition api", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nas opencre.org is getting adoption we receive an increasing number of requests for SDKs, Clients or other API integrations.\r\nWe currently do not have any way of integrating OpenCRE with the outside world other than its undocumented API or the frontend. If we document our API we will be able to use Open APIs code generation capabilities to create clients.\r\n\r\n### Expected Behaviour\r\n\r\nThere is a formal definition of our api in either .proto or swagger files.\r\nThese files can be used to generate clients.\r\n", + "summary": "The issue highlights the need for a formal definition of the OpenCRE API, as the current undocumented API limits external integration and client generation. To address this, the suggestion is to create either Swagger or .proto files for the API. This documentation will enable the generation of SDKs and clients, facilitating improved adoption and integration with OpenCRE. \n\nNext steps involve defining the API specifications in the chosen format (Swagger or .proto).", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/295", @@ -5605,6 +5800,7 @@ "node_id": "I_kwDOF9wO7c5rqXY7", "title": "[BUG] pci dss repeats section id in section", "body": "the parser for the pci dss standard repeats the section id in the section name field", + "summary": "There is a bug in the PCI DSS parser where the section ID is being duplicated in the section name field. To resolve this issue, the parsing logic should be reviewed and adjusted to ensure that the section ID is not repeated in the output.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/328", @@ -5635,6 +5831,7 @@ "node_id": "I_kwDOF9wO7c5s4wVF", "title": "[Feature request] Create a flexible endpoint for documents", "body": "**Current behaviour**\r\n\r\nThere is currently an endpoint `/rest/v1/id/xxx-xxx` in the application that is responsible for a particular CRE instance. Such an instance has a unique identifier, which is a pair of three-digit numbers separated by a dash. Within the body of this instance there are links to other CREs and documents. If we want to get any linked CRE instance, we can easily get it using the above mentioned endpoint via its id.\r\n\r\n**Problem**\r\n\r\nThis approach allows us to easily move from parent to child CRE instances and vice versa, but it doesn't include other many-to-many relationships, i.e. between CRE and linked document. It doesn't allow us to get a lot of important information, such as\r\n- documents of a certain type for a given CRE\r\n- all CREs linked to this document\r\netc.\r\n\r\n**Feature Request**\r\n\r\nUsing queryset parameters, we could easily solve both problems with the following requests:\r\n`/rest/v1/document/?CRE=xxx-xxx` solves the first problem and\r\n`/rest/v1/document/?CRE=xxx-xxx&doctype=Standard` solves the second. \r\n\r\nSo the request is to combine all documents within an endpoint and add the ability to filter document instances using the queryset parameter.\r\nThis would solve the problem mentioned in the https://github.com/OWASP/common-requirement-enumeration/issues/285 ticket and add other important functionality. ", + "summary": "The issue discusses the limitations of the current endpoint `/rest/v1/id/xxx-xxx`, which allows for easy navigation between CRE instances but does not support many-to-many relationships with linked documents. It highlights the need for a more flexible endpoint that can provide access to documents related to a specific CRE as well as the CREs associated with a particular document. \n\nThe proposed solution is to create a new endpoint `/rest/v1/document/` that utilizes queryset parameters to filter documents by CRE and document type. Implementing this feature would enhance functionality and address the concerns raised in a related issue.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/334", @@ -5661,6 +5858,7 @@ "node_id": "I_kwDOF9wO7c5w-BEQ", "title": "Better inform users on chatbot privacy", "body": "Better inform users on chatbot privacy -\r\nwhile users log in, before they get shown the login, and when using it should be clear that:\r\n-we only need them to login to maximize the number of queries per minute per unique user\r\n-their account is not used to authenticate with the Large Language Model\r\n-only their prompt is sent\r\n-see the info on the chatbot page on this\r\n-maybe we should link to the privacy policy of the PALM LLM\r\n-we also need to update our privacy policy to reflect this info. The google SSO refers to it\r\n\r\nIt's best to first show a page when people arrive at /chatbot and have no session: describing the above, saying welcome to OpenCRE chat, and then a link to login, taking you to google SSO.\r\n\r\nSo basically a piece of text to show on that landing page, in the privacy policy and on the chatbot page.\r\n\r\nThen somehow we need to deal with what google says in the SSO: \"To continue, Google will share your name, email address, language preference, and profile picture with opencre.org\" Either we need to change some settings, change that text, or refer to it in OUR text: despite that google sends us your name and mail address, we don't store it. Preferably we pick an SSO method that does not send it al all, or rather an alternative to google sso?", + "summary": "The issue highlights the need for improved user communication regarding chatbot privacy. It suggests that users should be informed about the login process and the data being handled, specifically that login is only for query management, no user account data is used for authentication with the Large Language Model, and only user prompts are sent. Recommendations include creating a dedicated landing page for first-time users, updating the privacy policy, and linking to the PALM LLM privacy policy. Additionally, the issue raises concerns about the information shared by Google during the SSO process and suggests considering alternative authentication methods that do not share personal data. To address these points, a clear informational text should be developed for the landing page, chatbot page, and privacy policy.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/375", @@ -5689,6 +5887,7 @@ "node_id": "I_kwDOF9wO7c5xBKXd", "title": "Have a separate UI element to enter language request, to prevent requests in the prompt to bother the matching", "body": "Have a separate UI element to enter language request, to prevent requests in the prompt to bother the matching.\r\nIf you ask 'Answer in Chinese how I should prevent command injection' you get a worse opencre match than when you ask 'How should I prevent command injection'.\r\nProbably because we calculate the embedding over the entire user prompt.\r\nWe can do two things:\r\n1. add a text box with the label 'Language of Answer' and put English in it by default, then use that in constructing the final prompt\r\n2. add a text box with the label 'Instructions' and put there by default 'Anwer in English'. People can change that to 'Answer in layman terms in German' for example. This is the nicest I believe", + "summary": "The issue discusses the need for a separate UI element to specify language requests in order to avoid interfering with the main prompt used for matching. Currently, including language requests within the prompt negatively impacts the quality of the matching. The proposed solutions include adding a dedicated text box labeled 'Language of Answer', defaulting to English, or creating a text box labeled 'Instructions' with a default prompt of 'Answer in English', allowing users to customize their requests. Implementing one of these solutions could improve the matching process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/376", @@ -5717,6 +5916,7 @@ "node_id": "I_kwDOF9wO7c5xN7v9", "title": "List supported standards and documents", "body": "## Issue\r\n[From Sam Stepanyan]\r\nIt would be convenient to know which standards and in general documents are supported by opencre\r\nthis helps both with chatbot queries but also general knowledge of what OpenCRE supports\r\n\r\nImplementation wise this is a simple query to select unique names from nodes and a frontend page to present this somehow", + "summary": "The issue suggests the need for a comprehensive list of supported standards and documents by OpenCRE to enhance user knowledge and assist with chatbot queries. It proposes a straightforward implementation involving a query to select unique names from nodes, followed by the creation of a frontend page for presentation. To move forward, the team should focus on developing this query and designing the frontend display.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/383", @@ -5743,6 +5943,7 @@ "node_id": "I_kwDOF9wO7c5zHX82", "title": "Create a Product Security Wayfinder component", "body": "## Issue\r\nWe created the first Wayfinder manually here https://owasp.org/projects/\r\nIt's cool, the community has embraced it but it's manual and currently sits in a shared drive.\r\n\r\n\r\n### Expected Behaviour\r\n\r\nAs an OpenCRE.org user I can browse a graphical way of exploring tools and standards known to OpenCRE by grouping them on which part of the SDLC they map to. Similar to the wayfinder above.\r\n\r\nI can filter the new wayfinder by selecting supporting organizations, licenses or any other Key from the metadata fields.\r\n\r\n### Actual Behaviour\r\n\r\nWe have the data to automate the creation of the wayfinder, let's do so.\r\n\r\nTo achieve this we need to\r\n* grab unique names from every non-cre node in our database \r\n* enrich them with metadata such as SDLC steps they fit into\r\n* find a way to automatically provide this info on import (e.g. by modifying our importers or creating a static map to start and then require projects to add this info in their self contained links)\r\n* present this info in a frontend in a nice way allowing for filtering based on supporting org, license, and other metadata we may have\r\n", + "summary": "The issue discusses the need to create an automated Product Security Wayfinder component, which would enhance the current manual version hosted on a shared drive. The expected functionality includes a graphical interface for users to explore tools and standards related to OpenCRE, organized by their relevance in the Software Development Life Cycle (SDLC). Users should be able to filter options based on metadata such as supporting organizations and licenses.\n\nTo implement this, the following steps are proposed:\n- Extract unique names from non-CRE nodes in the database.\n- Enrich this data with relevant metadata.\n- Automate the information import process, possibly by modifying existing importers or creating a static initial mapping.\n- Develop a user-friendly frontend to present the data and allow for filtering based on various metadata attributes.\n\nThe next steps should focus on gathering the necessary data and developing the automated processes for the Wayfinder.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/426", @@ -5769,6 +5970,7 @@ "node_id": "I_kwDOF9wO7c52k3nR", "title": "Mapping issue: NIST SSDF to NIST SP 800-53", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nThe mapping from NIST SSDF PO.1.2 (Identify and document all security requirements) to SP800-53 gives SC-18 Mobile Code as the only Direct mapping. This doesn't seem correct.\r\n\r\n### Expected Behaviour\r\n\r\nI don't have extensive knowledge of 800-53, but I would think SA-8 is a closer match for instance.\r\n", + "summary": "The issue raised concerns the mapping of NIST SSDF PO.1.2, which focuses on identifying and documenting all security requirements, to NIST SP 800-53. Currently, SC-18 Mobile Code is the only direct mapping provided, which is being questioned as potentially incorrect. The expectation is that SA-8 might be a more appropriate match. \n\nTo address this, a review of the mappings between SSDF and SP 800-53 is necessary to ensure accuracy and completeness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/446", @@ -5797,6 +5999,7 @@ "node_id": "I_kwDOF9wO7c52q6ox", "title": "Mapping issue: Missing a reference to the OWASP DoS cheat sheet", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nWhen I type \"denial of service\" in the search field on opencre.org I cannot find a reference to the OWASP Denial of Service cheat sheet - https://cheatsheetseries.owasp.org/cheatsheets/Denial_of_Service_Cheat_Sheet.html\r\n\r\n### Expected Behaviour\r\n\r\nThe OWASP Denial of Service cheat sheet link should appear as a reference.\r\n\r\n### Actual Behaviour\r\n\r\nSeveral other references appear on the results page (e.g. ASVS, other OWASP cheat sheets, CAPEC, NIST, CWE, etc) but no link to the OWASP DoS cheat sheet.\r\n\r\n### Steps to reproduce\r\n\r\nVisit opencre.org and type in \"denial of service\" in the search field and then click Search.", + "summary": "A mapping issue has been identified regarding the absence of a reference to the OWASP Denial of Service (DoS) cheat sheet when searching for \"denial of service\" on opencre.org. Currently, other relevant references appear in the search results, but the specific link to the OWASP DoS cheat sheet is missing. To address this issue, the OWASP DoS cheat sheet should be added to the search results to ensure it is accessible to users.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/449", @@ -5823,6 +6026,7 @@ "node_id": "I_kwDOF9wO7c53ZL51", "title": "Use 'Requirement' instead of CRE", "body": "New visitors don't know what a CRE is. Besides it's common requirement ENUMERATION. So it should in fact read CR. Let's call it Requirement everywhere. To start with at the places where we say \"Which contains CREs\" and \"Which is related to CREs\"", + "summary": "The issue suggests replacing the term \"CRE\" with \"Requirement\" for clarity, as new visitors may not understand what CRE means. It proposes to use \"Requirement\" consistently throughout the documentation, starting with locations that reference \"Which contains CREs\" and \"Which is related to CREs.\" The necessary action includes updating these specific phrases in the documentation to reflect the new terminology.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/453", @@ -5849,6 +6053,7 @@ "node_id": "I_kwDOF9wO7c537Y2P", "title": "Add arguments to cre endpoint to support the standard integration and metrics", "body": "When a standard like WSTG links to OpenCRE's, we at least can ask them to add an argument saying 'source=bla' where bla needs to be some recognizable name for the standard - perhaps an encoded name, or something else that may be less likely to change over time.\r\nThat way we can keep count.\r\nAlso: do we need more info that will help us in parsing? I guess not because for WSTG we'll use the content of the page to parse the title and reconstruct the hyperlink, right?", + "summary": "The issue suggests enhancing the 'cre' endpoint by adding an argument for tracking integration with standards like WSTG. Specifically, it proposes including a 'source' parameter with a recognizable name for the standard to facilitate counting. Additionally, it questions whether more information is needed for parsing, noting that for WSTG, the page content will be used to parse the title and reconstruct hyperlinks. To address this issue, the team should consider implementing the 'source' argument and evaluate if additional parsing information is necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/455", @@ -5877,6 +6082,7 @@ "node_id": "I_kwDOF9wO7c55zvZ8", "title": "Map Analysis UI Clunkiness", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nWhen browsing a list of standards that map to a single standard and hovering over one, the info box that pops up covers the next few in the list. This makes quickly browsing through standards in the same section clunky as you need to move the mouse away until the box disappears before you can hover over the next standard. \r\n\r\n![image](https://github.com/OWASP/OpenCRE/assets/10589589/147e15fb-20db-4882-85f4-b0851fc13ee8)\r\n\r\n### Expected Behaviour\r\n\r\nIdeally the hover-over info box would be offset to the right/left so that the following or preceding standards aren't covered.\r\n\r\n### Actual Behaviour\r\n\r\nDescribed above.\r\n\r\n### Steps to reproduce\r\n\r\nDescribed above.", + "summary": "The issue pertains to the clunkiness of the Map Analysis UI, specifically regarding the hover-over info box that appears when browsing standards. The current implementation causes the info box to obstruct subsequent items in the list, hindering quick navigation. The expected behavior is for the info box to be offset to prevent it from covering adjacent standards. To resolve this, adjustments need to be made to the positioning of the hover-over info box.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/456", @@ -5905,6 +6111,7 @@ "node_id": "I_kwDOF9wO7c57bO9U", "title": "Add CWE titles", "body": "Currently we only have CWE ids. For users it is often handy to see what weakness is linked without having to click on it. In Mapanalysis especially.\r\nWe have titles (Sectionnames) for many standards.\r\nI think we used to have the titles.\r\n", + "summary": "The issue highlights the need to add Common Weakness Enumeration (CWE) titles alongside the existing CWE IDs to enhance user experience, particularly in the Mapanalysis tool. It suggests that having the titles readily visible would eliminate the need for users to click to find out what each weakness refers to. It is also noted that titles for many standards are already available, implying that reinstating the CWE titles should be feasible. To address this, the implementation of CWE titles should be considered.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/468", @@ -5931,6 +6138,7 @@ "node_id": "I_kwDOF9wO7c57bPiM", "title": "Add OpenCRE itself to mapanalysis", "body": "It would be good to have opencre be one of the standards in the map analyis. it can be helpful to have a opencre-samm overview for example. \r\n", + "summary": "The issue suggests incorporating OpenCRE into the map analysis framework, highlighting its potential as a standard tool. It specifically mentions the need for an overview of OpenCRE in relation to SAMM. To address this, the implementation of OpenCRE within the map analysis system is recommended.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/469", @@ -5959,6 +6167,7 @@ "node_id": "I_kwDOF9wO7c57bP3e", "title": "Return hyperlinks from mapanalysis REST endpoint", "body": "See title", + "summary": "The issue requests the implementation of returning hyperlinks from the mapanalysis REST endpoint. To address this, the development team should consider modifying the existing endpoint to include the necessary hyperlink data in its response.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/470", @@ -5985,6 +6194,7 @@ "node_id": "I_kwDOF9wO7c57bQO0", "title": "Mapping: to more owasp resources", "body": "Link to resources such as https://owasp.org/www-community/attacks/Path_Traversal ", + "summary": "The issue suggests enhancing the current documentation by adding links to more OWASP resources, specifically mentioning the Path Traversal attack page. It would be beneficial to identify additional relevant OWASP resources to include in the mapping for improved guidance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/471", @@ -6013,6 +6223,7 @@ "node_id": "I_kwDOF9wO7c57bQhL", "title": "Mapping: Map to the rest of CWE", "body": "6Could we map the rest of CWE, such as 652-query injection linking to injection?", + "summary": "The issue discusses the need to map additional Common Weakness Enumeration (CWE) entries, specifically suggesting the inclusion of CWE-652 (query injection) under the broader category of injection vulnerabilities. The action required involves expanding the current mapping to ensure all relevant CWEs are properly linked.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/472", @@ -6041,6 +6252,7 @@ "node_id": "I_kwDOF9wO7c5-pggV", "title": "Make smartlink go to CRE directly", "body": "If you smartlink for example with CWE 611 and there's only one CRE connected, it should jump straight to that CRE and show the wealth of information. That CRE page has all the information that the current smartlink landing page has, so.\r\n[https://www.opencre.org/smartlink/standard/CWE/611](https://www.opencre.org/smartlink/standard/CWE/611)", + "summary": "The issue requests that the smartlink feature be updated so that when there is only one connected CRE (Common Related Entity) for a specific CWE (Common Weakness Enumeration), it should redirect users directly to that CRE's page. The current landing page for smartlinks contains the same information as the CRE page, making this direct navigation more efficient. To resolve this, the functionality for smartlink should be modified to check the number of connected CREs and redirect accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/486", @@ -6069,6 +6281,7 @@ "node_id": "I_kwDOF9wO7c6BB3rY", "title": "Try alternative graph database", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nGap analysis calculation takes a few hours if we run it on a beefy machine. The bottleneck is neo4j being slow.\r\nWe should investigate alternatives.\r\nAn alternative that seems promising and works with our existing database is [Apache Age](https://age.apache.org/age-manual/master/intro/setup.html)\r\n\r\nAge runs on top of our current database (postgres) and allows for Cypher queries on top of postgres.\r\nI'm being told it is faster than neo", + "summary": "The issue highlights that the gap analysis calculation is significantly slow when using Neo4j, taking several hours on a powerful machine. As a potential solution, the suggestion is to explore alternatives, particularly Apache Age, which operates on PostgreSQL and supports Cypher queries. It is reported to be faster than Neo4j. The next step involves investigating the feasibility and performance of implementing Apache Age as a replacement for Neo4j to improve the calculation speed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/488", @@ -6098,6 +6311,7 @@ "node_id": "I_kwDOF9wO7c6Cg4z-", "title": "Numpy 1.23 does not support python-3.11", "body": "\r\n### Numpy and Python version mismatch\r\n\r\nNumpy 1.23 does not support python 3.11 so when running `pip install -r requirements.txt` the process fails.\r\nDocs: https://numpy.org/devdocs/release/1.23.1-notes.htmlhttps://numpy.org/devdocs/release/1.23.1-notes.html (the 1.23.0 notes don't mention explicitly that supported version but I don't expect it to support more than the 1.23.1 version)\r\n\r\n### Expected Behaviour\r\n\r\nCan install packages with recommended Python version.\r\n\r\n### Actual Behaviour\r\n\r\n```\r\npip install -r requirements.txt\r\n...\r\nnote: This error originates from a subprocess, and is likely not a problem with pip.\r\n ERROR: Failed building wheel for numpy\r\nFailed to build numpy\r\nERROR: Could not build wheels for numpy, which is required to install pyproject.toml-based projects\r\n```\r\n\r\n### Steps to reproduce\r\n\r\nCreate a fresh python venv with python 3.11.7 and then run:\r\n```\r\npip install -r requirements.txt\r\n```", + "summary": "There is an issue with Numpy 1.23, as it does not support Python 3.11. When attempting to run `pip install -r requirements.txt`, the installation fails due to an inability to build the Numpy wheel, resulting in error messages related to the subprocess. The expected behavior is to install the packages compatible with the recommended Python version. To resolve this, it is suggested to either downgrade Python to a supported version or upgrade Numpy to a version that is compatible with Python 3.11.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/490", @@ -6124,6 +6338,7 @@ "node_id": "I_kwDOF9wO7c6FB7lJ", "title": "[explorer] add graph debugging information", "body": "## Issue\r\n\r\nboth the original explorer and the cre version do not show WHY the graph is the shape it is, for the sake of debugging we should be albe to know why the graph has the specific shape it has (which are the root cres, the ones after that etc)", + "summary": "The issue highlights the lack of debugging information in both the original explorer and the cre version regarding the shape of the graph. It suggests that users should have access to details explaining the reasons behind the graph's specific configuration, including information about root nodes and subsequent elements. To address this, the implementation of a feature that provides insights into the graph structure for debugging purposes is recommended.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/493", @@ -6150,6 +6365,7 @@ "node_id": "I_kwDOF9wO7c6FB8Ap", "title": "[explorer] improve graph visuals", "body": "## Issue\r\n\r\nimprove the CRE graph visuals by making them as responsive and cool as the originals", + "summary": "The issue discusses the need to enhance the visuals of the CRE graph to ensure they are as responsive and visually appealing as the original versions. To address this, improvements in design and responsiveness should be implemented.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/494", @@ -6176,6 +6392,7 @@ "node_id": "I_kwDOF9wO7c6GDN0H", "title": "[api] add ability to export results as CycloneDX attestations", "body": "## Issue\r\n\r\n### **What is the issue?**\r\nCycloneDX supports attestations. This is their way to do standards and they have native CRE support in their attestations.\r\n\r\nTime for us to also support CycloneDX\r\n\r\nFind a way to make OpenCRE dump attestation documents https://cyclonedx-python-library.readthedocs.io/en/latest/autoapi/cyclonedx/model/index.html#cyclonedx.model.ExternalReferenceType.ATTESTATION\r\n\r\nhttps://github.com/CycloneDX/guides/blob/main/Attestations/en/0x30-Making-Attestations.md", + "summary": "The issue highlights the need to add functionality for exporting results as CycloneDX attestations, which are a part of the CycloneDX standard. The goal is to implement support for this format to enable OpenCRE to generate attestation documents. Reference materials include the CycloneDX Python library documentation and a guide on making attestations. To address this, the development team should explore how to integrate CycloneDX's attestation capabilities into the existing system.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/499", @@ -6204,6 +6421,7 @@ "node_id": "I_kwDOF9wO7c6JGXzk", "title": "Prune map analysis search to save time and memory", "body": "Map analysis takes days to calculate and gigabytes to store, but we're only interested in the top matches from standard to standard. It's no use to find 100 weakly linked topics if you have a couple of direct links and a number of strong links.\r\nThe idea therefore is to prune the search we do for map analysis, by stopping search early if we know that we're looking\r\n1. design a strategy for pruning. First stab: don't search for average and weak links if we have direct and strong links. Don't search for weak links if we have direct, strong or average links'.\r\n2. update the search algorithm. This will require some smartness, depending on how it is implemented. \r\n\r\nBy the way, I notice we don't seem to have weak links anymore: should we change our categorization and make 3-6 average and 7+ weak?\r\n\r\nFirst step: find expertise in Neo4J. Rob ", + "summary": "The issue highlights the inefficiencies in the current map analysis process, which takes an excessive amount of time and storage space due to the inclusion of less relevant link types. The proposal suggests implementing a pruning strategy to optimize the search. This includes:\n\n1. Designing a pruning strategy to avoid searching for average and weak links when direct and strong links are present.\n2. Updating the search algorithm to incorporate this strategy effectively.\n\nAdditionally, there is a consideration of redefining link categories, as weak links may no longer be applicable. The first action suggested is to consult with an expert in Neo4J to assist with these changes.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/506", @@ -6232,6 +6450,7 @@ "node_id": "I_kwDOF9wO7c6L31xX", "title": "Explorer is incomplete", "body": "opencre.com/explorer is missing a lot of branches, for example everything under input and output protection.\r\nIf adding those branches makes building the tree up slower, the build up probably will need to be optimized. ", + "summary": "The Explorer on opencre.com is lacking several branches, particularly those related to input and output protection. To address this, it is suggested that these branches be added, and if their inclusion slows down the tree-building process, optimization may be necessary to improve performance.", "state": "open", "state_reason": "reopened", "url": "https://github.com/OWASP/OpenCRE/issues/514", @@ -6263,6 +6482,7 @@ "node_id": "I_kwDOF9wO7c6OrUtU", "title": "fix e2e tests", "body": "## Issue\r\n\r\n### **What is the issue?**\r\ne2e tests are failing, we should fix them\r\n", + "summary": "The end-to-end (e2e) tests are currently failing and require attention to ensure they function correctly. The task involves diagnosing the issues causing the failures and implementing fixes accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/523", @@ -6295,6 +6515,7 @@ "node_id": "I_kwDOF9wO7c6P2GHC", "title": "cp: cannot stat 'cres/db.sqlite': No such file or directory", "body": "## Issue\r\ncp: cannot stat 'cres/db.sqlite': No such file or directory\r\n\r\n### **What is the issue?**\r\n\r\nI am new to the project and trying to get it installed on my Ubuntu machine. \r\n\r\nHave cloned the repository and on step 2 which states:\r\n**Copy sqlite database to required location\r\ncp cres/db.sqlite standards_cache.sqlite**\r\n\r\nI am getting this error: **cp: cannot stat 'cres/db.sqlite': No such file or directory**\r\n\r\nI have searched the repository for db.sqlite but have not been able to locate it.", + "summary": "The issue reported involves an error when attempting to copy a SQLite database file during the installation process of a project. The user is encountering the message \"cp: cannot stat 'cres/db.sqlite': No such file or directory\" after following the installation steps, specifically when trying to copy the file from the 'cres' directory to 'standards_cache.sqlite'. \n\nIt seems that the required 'db.sqlite' file is missing from the repository. To resolve this, it may be necessary to check if the file should be included in the repository or if there are any instructions on how to generate or obtain this database file.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/527", @@ -6321,6 +6542,7 @@ "node_id": "I_kwDOF9wO7c6P3CKZ", "title": "zeljkoobrenovic.github.io - reported as unsafe", "body": "## Issue\r\n\r\nThis site has been reported as unsafe\r\nHosted by zeljkoobrenovic.github.io\r\nMicrosoft recommends you don't continue to this site. It has been reported to Microsoft for containing phishing threats which may try to steal personal or financial information.\r\n\r\n### **What is the issue?**\r\n\r\nWhen clicking \"Explore our catalog in one list\" on the homepage that links to this page: [zeljkoobrenovic.github.io](url) am getting a Microsoft warning page.\r\n\r\n### Expected Behaviour\r\n\r\nWhat should have happened?\r\n\r\nShould link to a safe page.\r\n\r\n### Actual Behaviour\r\n\r\nWhat actually happened?\r\n\r\nLinks to a page that has been flagged as containing phishing threats.\r\n\r\n### Steps to reproduce\r\n\r\nHow can we reproduce the error?\r\n\r\n- Go to homepage of opencre\r\n- Click **Explore**", + "summary": "The issue reported involves the site zeljkoobrenovic.github.io being flagged as unsafe due to phishing threats, specifically when users click on the \"Explore our catalog in one list\" link on the homepage. Users receive a warning from Microsoft advising against continuing to the site. The expected behavior is for the link to direct to a safe page, but instead, it leads to a flagged page. \n\nTo resolve this issue, it is necessary to investigate the content of the linked page and address any potential security concerns to ensure it is safe for users.", "state": "open", "state_reason": "reopened", "url": "https://github.com/OWASP/OpenCRE/issues/528", @@ -6351,6 +6573,7 @@ "node_id": "I_kwDOF9wO7c6QU3Ea", "title": "make `--upstream_sync` also sync the gap analysis graph progressively", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nCurrent gap analysis graph is 9Gb which makes it very hard to download reliably when running locally (also you shouldn't need 9Gb of data to run the project)\r\n\r\n### Expected Behaviour\r\n\r\nUpstream sync should either download the data necessary in order to run(graph structure).\r\nFor the Gap Analysis graph it should either grab it from a place suitable for this sort of data (e.g. google public dataset) or it should load it from upstream opportunistically and then save it in the local database\r\n", + "summary": "The issue addresses the challenge of managing the large 9GB gap analysis graph, which complicates local downloads. The expected behavior is for the `--upstream_sync` command to facilitate a more efficient download process by either obtaining the necessary data from a more suitable source, like a Google public dataset, or by opportunistically loading it from upstream and saving it in the local database. To resolve this, modifications to the syncing process may be needed to enhance data management and reduce the local storage burden.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/534", @@ -6381,6 +6604,7 @@ "node_id": "I_kwDOF9wO7c6Rf2Oi", "title": "Explorer search removes all standards", "body": "The amazing new native explorer looks great. I observed that when you enter a search term such as session, that the linked standards are no longer displayed. I doubt this is by design. Can it be fixed?", + "summary": "The issue reported concerns the native explorer's search functionality, which fails to display linked standards when a search term is entered. It seems this behavior is unintended, and a fix is needed to ensure that standards remain visible during searches.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/544", @@ -6410,6 +6634,7 @@ "node_id": "I_kwDOF9wO7c6SOSiO", "title": "Whitespaces in URLs from Chatbot - 'You can find more information about this section of CWE on its OpenCRE page'", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nWhen clicking from the Chatbot the following link white spaces appear in the URL: 'You can find more information about this section of CWE on its OpenCRE page'\r\n\r\n### Expected Behaviour\r\n\r\nWhat should have happened?\r\n\r\nHyphens should be used.\r\n\r\n### Actual Behaviour\r\n\r\nWhat actually happened?\r\n\r\nWhite spaces in URL result in %20 and could cause issues with different browsers and a potentially poor user experience.\r\n\r\n### Steps to reproduce\r\n\r\nHow can we reproduce the error?\r\n\r\nAsk the chatbot a question and click on 'You can find more information about this section of CWE on its OpenCRE page' at the bottom of the response.", + "summary": "The issue reported involves the presence of white spaces in URLs generated by the Chatbot, specifically in the link text: 'You can find more information about this section of CWE on its OpenCRE page'. Instead of using hyphens, the white spaces are encoded as %20, which may lead to compatibility issues across different browsers and negatively impact user experience. \n\nTo resolve this, the link formatting should be adjusted to replace white spaces with hyphens in the URLs.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/545", @@ -6436,6 +6661,7 @@ "node_id": "I_kwDOF9wO7c6SXAXN", "title": "ISO control numbers interpreted wrongfully", "body": "Spyros we have a recurring problem with the ISO numbers. there are two 7.1's and two 8.1's etc. I was pointed to it by a user. Last time this happened because 7.10 was interpreted as a value and was turned into 7.1. It should be treated as a string. I recall you fixing this.\r\nGood to see that folks are noticing.", + "summary": "The issue involves the incorrect interpretation of ISO control numbers, specifically with versions like 7.1 and 8.1 being duplicated due to numerical misinterpretation (e.g., 7.10 being treated as 7.1). This misinterpretation needs to be addressed by ensuring that these numbers are treated as strings rather than numeric values. A potential solution may involve revisiting previous fixes that addressed similar problems.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/546", @@ -6464,6 +6690,7 @@ "node_id": "I_kwDOF9wO7c6Tszn8", "title": "python cre.py --upstream_sync - ModuleNotFoundError: No module named 'click'", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nAfter **make install** should there be an instruction to run **pip install -r requirements.txt** ?\r\n\r\n### Expected Behaviour\r\n\r\nWhat should have happened?\r\n\r\nRequirements should be installed and the command **python cre.py --upstream_sync** should not produce an error.\r\n\r\n### Actual Behaviour\r\n\r\nWhat actually happened?\r\n\r\nWhen running \r\n\r\nGetting: **ModuleNotFoundError: No module named 'click'**\r\n\r\nHow can we reproduce the error?\r\n\r\nSet up a new project and follow steps as outlined in README", + "summary": "The issue reported involves a `ModuleNotFoundError` for the 'click' module when executing the command `python cre.py --upstream_sync` after performing a `make install`. The expected behavior is that all necessary requirements should be installed automatically, preventing such errors. However, it seems that the required packages may not have been installed, leading to the error when running the command. \n\nTo resolve this, it may be necessary to include an instruction to run `pip install -r requirements.txt` after the `make install` step in the setup documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/551", @@ -6486,10 +6713,11 @@ "pk": 228, "fields": { "nest_created_at": "2024-09-11T19:31:56.826Z", - "nest_updated_at": "2024-09-12T00:16:07.817Z", + "nest_updated_at": "2024-09-13T17:00:20.264Z", "node_id": "I_kwDOF9wO7c6VpBSs", "title": "[Import] Existing graph contains cycle", "body": "## Issue\r\n\r\n### **What is the issue?**\r\n\r\nAn error happens when trying to import a mapping after populating the database from upstream \r\n\r\n### Expected Behaviour\r\n\r\nWhen I run the import on an empty database, it imports correctly\r\n\r\nCommand to clean the database:\r\n```\r\n~/OpenCRE$ rm -rf standards_cache.sqlite; make migrate-upgrade\r\n```\r\n\r\nCommand to start the import:\r\n```\r\n~$ curl -X POST http://CRE-LOCAL-SERVER:5000/rest/v1/cre_csv_import -F \"cre_csv=@cra_cre.csv\"\r\n{\"new_cres\":[\"347-352\",\"028-254\",\"820-878\",\"007-274\",\"286-500\",\"732-148\",\"002-202\",\"841-757\",\"240-274\",\"261-010\",\"766-162\",\"731-120\",\"227-045\",\"571-640\"],\"new_standards\":7,\"status\":\"success\"}\r\n```\r\n\r\nResult from the server side:\r\n```\r\nINFO:application.cmd.cre_main:Registering resource CSA CCM of length 1\r\nINFO:application.database.db:knew of node CSA CCM:IVS-06:Vulnerability Remediation:https://cloudsecurityalliance.org/research/cloud-c\r\n controls-matrix/ ,updating\r\nINFO:werkzeug:161.218.188.108 - - [06/Sep/2024 10:13:14] \"POST /rest/v1/cre_csv_import HTTP/1.1\" 200 -\r\n```\r\n\r\n### Actual Behaviour\r\n\r\nCommand to clean the database and populate from upstream:\r\n```\r\n~/OpenCRE$ rm -rf standards_cache.sqlite; make migrate-upgrade; python cre.py --upstream_sync\r\n```\r\n\r\nCommand to start the import:\r\n```\r\n~$ curl -X POST http://CRE-LOCAL-SERVER:5000/rest/v1/cre_csv_import -F \"cre_csv=@cra_cre.csv\"\r\n\r\n\r\n500 Internal Server Error\r\n

Internal Server Error

\r\n

The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.

\r\n```\r\n\r\nResult from the server side:\r\n```\r\n return cors_after_request(app.make_response(f(*args, **kwargs)))\r\n File \"/home/csi/OpenCRE/venv/lib/python3.10/site-packages/flask/app.py\", line 880, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/home/csi/OpenCRE/venv/lib/python3.10/site-packages/flask/app.py\", line 865, in dispatch_request\r\n return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args) # type: ignore[no-any-return]\r\n File \"/home/csi/OpenCRE/application/web/web_main.py\", line 743, in import_from_cre_csv\r\n new_cre, exists = cre_main.register_cre(cre, database)\r\n File \"/home/csi/OpenCRE/application/cmd/cre_main.py\", line 122, in register_cre\r\n collection.add_internal_link(\r\n File \"/home/csi/OpenCRE/application/database/db.py\", line 1597, in add_internal_link\r\n cycle = self.__introduces_cycle(f\"CRE: {higher.id}\", f\"CRE: {lower.id}\")\r\n File \"/home/csi/OpenCRE/application/database/db.py\", line 766, in __introduces_cycle\r\n raise ValueError(\r\nValueError: Existing graph contains cycle,this not a recoverable error, manual database actions are required [('CRE: 155-155', 'CRE:\r\n 546-564'), ('CRE: 546-564', 'CRE: 155-155')]\r\nINFO:werkzeug:161.218.188.108 - - [06/Sep/2024 10:08:39] \"POST /rest/v1/cre_csv_import HTTP/1.1\" 500 -\r\n```\r\n\r\n### Steps to reproduce\r\n\r\nFirst clean the database and populate from upstream:\r\n```\r\n~/OpenCRE$ rm -rf standards_cache.sqlite; make migrate-upgrade; python cre.py --upstream_sync\r\n```\r\n\r\nSecond have a CSV file in the correct format to import (name the file cra_cre.csv with the following content)\r\n```\r\nCRE 0,CRE 1,CRE 2,CRE 3,CRE 4,Cyber Resiliency Act|name,Cyber Resiliency Act|id,Cyber Resiliency Act|hyperlink,NIST 800-53 v5|name,NIST 800-53 v5|id,NIST 800-53 v5|hyperlink,ISO/IEC 27001:2013|name,ISO/IEC 27001:2013|id,ISO/IEC 27001:2013|hyperlink,ASVS|name,ASVS|id,ASVS|hyperlink,CIS v8|name,CIS v8|id,CIS v8|hyperlink,PCI DSS|name,PCI DSS|id,PCI DSS|hyperlink,CSA CCM|name,CSA CCM|id,CSA CCM|hyperlink\r\n,,347-352|Set and confirm integrity of security deployment configuration,,,Designed; developed; and produced to ensure cybersecurity,1.1,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,028-254|Secure auto-updates over full stack,,,Address and remediate vulnerabilities,2.2,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,820-878|Document all trust boundaries and significant data flows,,,Designed; developed; and produced to ensure cybersecurity,1.1,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,,007-274|Patching and updating system components,,Address and remediate vulnerabilities,2.2,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,,286-500|OS security,,Designed; developed; and produced to ensure cybersecurity,1.1,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,732-148|Vulnerability management,,,Address and remediate vulnerabilities,2.2,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,,002-202|Address and remediate,,Address and remediate vulnerabilities,2.2,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,Flaw Remediation,SI-2,https://csrc.nist.gov/publications/detail/sp/800-53/rev-5/final,Management of technical vulnerabilities,A.12.6.1,https://www.iso.org/standard/54534.html,Security Update Verification,V14.4,https://owasp.org/www-project-application-security-verification-standard/,Continuous Vulnerability Management,Control 7,https://www.cisecurity.org/controls/v8/,Ensure that all system components and software are protected from known vulnerabilities by installing applicable vendor-supplied security patches.,6.2,https://www.pcisecuritystandards.org/documents/PCI_DSS_v3-2-1.pdf,Vulnerability Remediation,IVS-06,https://cloudsecurityalliance.org/research/cloud-controls-matrix/\r\n,,,,\"841-757|Use approved cryptographic algorithms in generation, seeding and verification of OTPs\",Designed; developed; and produced to ensure cybersecurity,1.1,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,,240-274|Log only non-sensitive data,,Designed; developed; and produced to ensure cybersecurity,1.1,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,,261-010|Program management for secure software development,,Designed; developed; and produced to ensure cybersecurity,1.1,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,766-162|Security Analysis and documentation,,,,Address and remediate vulnerabilities,2.2,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,,,731-120|Document requirements for (data) protection levels,Designed; developed; and produced to ensure cybersecurity,1.1,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,,,227-045|Identify sensitive data and subject it to a policy,Designed; developed; and produced to ensure cybersecurity,1.1,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n,,,571-640|Personal data handling management,,Designed; developed; and produced to ensure cybersecurity,1.1,https://www.european-cyber-resilience-act.com/Cyber_Resilience_Act_Annex_1.html,,,,,,,,,,,,,,,,,,\r\n```\r\n\r\n\r\nThird run the command to start the import:\r\n```\r\n~$ curl -X POST http://CRE-LOCAL-SERVER:5000/rest/v1/cre_csv_import -F \"cre_csv=@cra_cre.csv\"\r\n```\r\n\r\n", + "summary": "The issue reported involves an error that occurs when attempting to import a mapping into a database that has been populated from an upstream source. While the import works correctly on an empty database, it fails with a \"500 Internal Server Error\" when the database contains data. The underlying problem seems to be related to a cycle in the existing graph structure, which prevents the import from completing successfully. \n\nTo resolve this, manual database actions are required to fix the cycle issue identified in the error message. It may involve reviewing the relationships between nodes to eliminate any cycles before attempting another import.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OpenCRE/issues/552", @@ -6516,10 +6744,11 @@ "pk": 229, "fields": { "nest_created_at": "2024-09-11T19:32:01.930Z", - "nest_updated_at": "2024-09-11T19:32:01.930Z", + "nest_updated_at": "2024-09-13T15:09:18.379Z", "node_id": "I_kwDOF9sgec4852Zl", "title": "images", "body": "![logo](https://user-images.githubusercontent.com/89657173/136672956-dc3689fb-106d-4803-9768-32130ab2f33f.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-gujarat-technological-university/issues/2", @@ -6546,6 +6775,7 @@ "node_id": "I_kwDOF8qHOc5f-TgD", "title": "Add descriptions for business users", "body": "## Context\r\n\r\nThe language of OWASP documents speaks to security, operations, IT and engineering professionals. The introduction of low-code/no-code has lowered the bar to become a digital builder. This is the root of why this technology is so important. However, it also means that we cannot assume the same level of technical or security accoman from all low-code/no-code developers.\r\n\r\nTo enable usage of the OWASP Low-Code/No-Code Top 10 by everyone, no matter their technical background, we would need to remove our assumptions and describe risks in a common language that everyone can understand. This should not replace the technical or security oriented language which is a golden standard of an OWASP project, but come as a parallel view on the same issues.\r\n\r\n## Proposal Description\r\n\r\nThe current template for a risk category is as follows:\r\n\r\n- Risk Rating\r\n- The Gist\r\n- Description\r\n- Example Attack Scenarios\r\n - Scenario 1\r\n - Scenario 2\r\n - ...\r\n- How to Prevent\r\n- References\r\n\r\nWe propose the following additions:\r\n- A new section between _Risk Rating_ and _The Gist_ describing the category in simple terms\r\n- A new subsection for each _Scenario_ describing it in simple terms", + "summary": "The issue highlights the need for clearer descriptions in OWASP documentation to accommodate business users and those with varying technical expertise, particularly in the context of low-code/no-code development. It suggests that the existing risk category template be enhanced by adding a new section that explains each risk category in simpler terms, as well as a simplified explanation for each attack scenario. This approach aims to make the information more accessible while maintaining the technical depth for expert users. \n\nTo implement this, it would be necessary to revise the template accordingly, ensuring that the new sections are clear and straightforward.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-low-code-no-code-security-risks/issues/48", @@ -6576,6 +6806,7 @@ "node_id": "I_kwDOF8qHOc5f-U7M", "title": "Add product-specific examples", "body": "## Context\r\n\r\nLow-Code/No-Code [can mean many different things](https://www.gartner.com/en/newsroom/press-releases/2022-12-13-gartner-forecasts-worldwide-low-code-development-technologies-market-to-grow-20-percent-in-2023). Tools can differ in technology, users, developers, use cases and more. For example, Low-Code Application Platforms (LCAP) is used to build web and mobile applications while Robotic Process Automation (RPA) is used to build bots. These technologies are ever-changing and are in the process of merging with eachother, so its still important to cover them in a single project. However, we should also emphasize where they differ, allowing people to focus on the risks relevant to a particular technology.\r\n\r\n## Proposal Description\r\n\r\nThe current template for a risk category is as follows:\r\n\r\n- Risk Rating\r\n- The Gist\r\n- Description\r\n- Example Attack Scenarios\r\n - Scenario 1\r\n - Scenario 2\r\n - ...\r\n- How to Prevent\r\n- References\r\n\r\nWe propose the following additions:\r\n- A new subsection under _Risk Rating_, where we provide different ratings for each Low-Code development technologies\r\n- Label each _Scenario_ with the relevant technologies. A scenario can apply to multiple technologies.\r\n\r\nLow-Code development technologies to distinguish:\r\n- Low-Code Application Platforms (LCAP)\r\n- Robotic Process Automation (RPA)\r\n- Integration Platform as a Service (iPaaS)", + "summary": "The issue discusses the need to enhance the current risk assessment template for Low-Code/No-Code technologies by incorporating product-specific examples. It highlights the diversity in Low-Code tools and the importance of distinguishing between different technologies, such as Low-Code Application Platforms (LCAP), Robotic Process Automation (RPA), and Integration Platform as a Service (iPaaS). \n\nThe proposal suggests adding a subsection under the Risk Rating to provide specific ratings for each technology and to label attack scenarios with the relevant technologies, allowing for a clearer understanding of risks associated with each. \n\nTo address this, the template should be updated to include these new subsections and categorizations for improved clarity and relevance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-low-code-no-code-security-risks/issues/49", @@ -6604,6 +6835,7 @@ "node_id": "I_kwDOF8qHOc5qYGEn", "title": "LCNC-SEC-04 - Scenario 2 (Example Attack Scenarios)", "body": "Consider (re)moving Scenario 2\r\n\r\n I know this is unrelated and is not part of the change, but scenario 2 doesn't really fit here, right?\r\n\r\n_Originally posted by @mbrg in https://github.com/OWASP/www-project-top-10-low-code-no-code-security-risks/pull/56#discussion_r1182454750_\r\n ", + "summary": "The issue raises a concern about the relevance of Scenario 2 in the context of the project. It suggests that Scenario 2 may not be appropriate for inclusion in the current documentation. A review of the content and context of Scenario 2 is needed to determine whether it should be removed or revised to better align with the project's objectives.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-low-code-no-code-security-risks/issues/67", @@ -6630,6 +6862,7 @@ "node_id": "I_kwDOF8qHOc5qYGWm", "title": "LCNC-SEC-03 - Clarify \"unintended consequences\"", "body": "\"Unintended consequences\" may be too nebulous, it's coverage is broad and vague. Clarity in the description and impact for what \"unintended consequences\" means in relation to data leakage is required, or consider a different title.\r\n\r\n@mbrg ", + "summary": "The issue raises concerns about the term \"unintended consequences,\" suggesting it is too vague and broad in its current usage. It calls for a clearer definition and description of its implications, specifically regarding data leakage. Alternatively, it proposes considering a different title to enhance clarity. To address this, a more precise formulation or rephrasing of the term is needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-low-code-no-code-security-risks/issues/68", @@ -6652,10 +6885,11 @@ "pk": 234, "fields": { "nest_created_at": "2024-09-11T19:32:31.845Z", - "nest_updated_at": "2024-09-11T19:32:31.845Z", + "nest_updated_at": "2024-09-13T03:46:09.791Z", "node_id": "I_kwDOF8qHOc5qYHG1", "title": "LCNC-SEC-10 - Consider moving attack scenario to LCNC-SEC-08", "body": " I know that this was part of current available version, but should we move this risk of having too verbose logs to LCNC-SEC-08?\r\n\r\n_Originally posted by @mbrg in https://github.com/OWASP/www-project-top-10-low-code-no-code-security-risks/pull/56#discussion_r1182467102_\r\n ", + "summary": "The issue discusses the possibility of relocating an attack scenario related to verbose logs from its current position to a different section, LCNC-SEC-08. While it acknowledges that the scenario is part of the current available version, it raises the question of whether this change would be beneficial. To address this, a review of both sections and their relevance to the scenario should be conducted to determine if the move is warranted.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-10-low-code-no-code-security-risks/issues/69", @@ -6678,10 +6912,11 @@ "pk": 235, "fields": { "nest_created_at": "2024-09-11T19:35:23.243Z", - "nest_updated_at": "2024-09-11T19:35:23.243Z", + "nest_updated_at": "2024-09-13T15:11:54.036Z", "node_id": "I_kwDOFOM8vs56Di1G", "title": "Hold MTG #22: 2024/10", "body": "- [x] fix the date\n- [ ] announce the meeting on the page\n- [ ] make connpass event\n- [ ] spread the word\n- [ ] hold the meeting\n- [ ] archive the announcement\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-saitama/issues/79", @@ -6708,6 +6943,7 @@ "node_id": "I_kwDOFOM8vs56Di2H", "title": "Hold MTG #23: 2024/12", "body": "- [ ] fix the date\n- [ ] announce the meeting on the page\n- [ ] make connpass event\n- [ ] spread the word\n- [ ] hold the meeting\n- [ ] archive the announcement\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-saitama/issues/80", @@ -6730,10 +6966,11 @@ "pk": 237, "fields": { "nest_created_at": "2024-09-11T19:35:34.620Z", - "nest_updated_at": "2024-09-11T19:35:34.620Z", + "nest_updated_at": "2024-09-13T15:12:02.613Z", "node_id": "I_kwDOFLkrvc55lLR7", "title": "2 typos \"prvilege requried \" -> \"privilege required \"", "body": "In line \"Note: Let’s call the Critical/High vulnerabilities with no prvilege requried and minimal user interaction as ‘OneClick’.\"\r\n\r\nhttps://github.com/OWASP/www-project-desktop-app-security-top-10/blob/435a699e590c8440e5592d1bfe32c682fd9b68d0/tab_severitybasedranking.md?plain=1#L16", + "summary": "The issue identifies two typographical errors in the text where \"prvilege requried\" should be corrected to \"privilege required.\" The line in question refers to Critical/High vulnerabilities that require no privilege and minimal user interaction, labeled as ‘OneClick’. The suggested action is to update the text in the specified line to reflect the correct spelling.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-desktop-app-security-top-10/issues/4", @@ -6760,6 +6997,7 @@ "node_id": "I_kwDOFAM8Ps5LY-F4", "title": "Vehicleid for existing users not showing up", "body": "When accessing the forum, a GET request to /community/api/v2/community/posts/recent is done to receive information about posts and comments. There are 3 existing posts made by the 3 default users, however there is no information about their vehicles.\r\n\r\n![image](https://user-images.githubusercontent.com/42330295/172621717-a02f05d4-407a-4c70-84c9-db8ec9faeccc.png)\r\n\r\nWhich is honestly not a big deal, you can simply create two users, post something and use their vehicle guids for the challenges. However, I think it would be nice to already have some pre-built vehicleids so the challenge progress feels more natural. I think the vehicles are already created because there is mechanic reports for the default users, so I'm not sure if this is the expected behaviour.\r\n\r\nBoth last stable and development versions have this behaviour. Environment: Macbook M1, macOS Monterey 12.4, Docker 4.7.1 (77678).\r\n", + "summary": "Fix the issue of vehicle IDs not displaying for existing users in the forum's recent posts API. Ensure that when a GET request is made to `/community/api/v2/community/posts/recent`, the response includes vehicle information for the three default users who have made posts.\n\n1. Investigate the API endpoint to confirm if vehicle IDs are being fetched correctly from the database.\n2. Check the database to ensure that vehicle records exist for the default users, and verify that the mechanic reports are correctly linked to these users.\n3. Review the data serialization process in the API to ensure that vehicle information is included in the response.\n4. Test the changes in both stable and development versions to confirm the fix.\n\nConsider adding pre-built vehicle IDs to enhance user experience in challenges.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/75", @@ -6789,6 +7027,7 @@ "node_id": "I_kwDOFAM8Ps5Mpxef", "title": "Community Author Information is a \"Snapshot\" rather than live", "body": "**Is your feature request related to a problem? Please describe.**\r\nIf you make a post in the community, your current avatar, email address, nickname, etc are all saved in Mongo at that moment. This causes a few issues:\r\n\r\n* If the author modifies any of their information, this isn't reflect in the community\r\n* Saving the image data in mongo creates overhead + the UI is less responsive when returning base64encoded image data.\r\n\r\n**Describe the solution you'd like**\r\nChange author information to be return dynamically. Additionally, don't save any image information in mongo or return image data as base64encoded info. Instead add a new endpoint that dynamically returns the correct user's avatar. Less overhead, less storage, plus caching rules can be set up for the images that the browser can handle.", + "summary": "Implement dynamic retrieval of community author information instead of storing it as a snapshot in MongoDB. \n\n1. Identify and modify the existing schema where author data (avatar, email, nickname) is currently saved.\n2. Create a new API endpoint that fetches the current author details dynamically from the user profile database, ensuring real-time accuracy.\n3. Remove the logic that saves image data in MongoDB. Instead, serve images directly from the user profile storage.\n4. Establish caching rules for image responses to enhance UI responsiveness and reduce server load.\n5. Test the new endpoint to ensure it correctly reflects any changes made to the author’s information in real-time. \n\nBegin by reviewing the current data retrieval methods in the community post functionality and plan the necessary changes to the backend architecture.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/80", @@ -6818,6 +7057,7 @@ "node_id": "I_kwDOFAM8Ps5PmTnc", "title": "Good to provide solution doc for all challenges", "body": "**Is your feature request related to a problem? Please describe.**\r\nPeople can explore the challenges but always good to provide final solutions as well.\r\n\r\n**Describe the solution you'd like**\r\nSome doc or youtube videos will definitely help for novice member like me\r\n\r\n**Describe alternatives if any you've considered**\r\nDoc or Video reference\r\n\r\n**Additional context**\r\nAdd any other context/screenshots/rough sketchs about the feature request here.", + "summary": "Create a comprehensive solution documentation for all challenges. Include both written guides and video tutorials to aid novice members in understanding the solutions.\n\n1. **Identify Challenges**: Compile a list of existing challenges that require documentation.\n2. **Document Solutions**: For each challenge, write clear, step-by-step solutions. Use code snippets where necessary to illustrate key points.\n3. **Create Video Content**: Record video tutorials that walk through the solutions, highlighting important concepts and providing visual aids.\n4. **Organize Content**: Structure the documentation in an easily navigable format, categorizing challenges by difficulty or topic.\n5. **Solicit Feedback**: After initial documentation is created, gather feedback from users to improve clarity and usefulness.\n\nBy following these steps, enhance the learning experience for all users engaging with the challenges.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/95", @@ -6847,6 +7087,7 @@ "node_id": "I_kwDOFAM8Ps5QJsda", "title": "Does crAPI's configuration allow it to be used as a Capture-the-flag (CTF) event?", "body": "**Is your feature request related to a problem? Please describe.**\r\nCurious to know if crAPI's configuration allows it to be used as Capture-the-flag (CTF) event\r\n\r\n**Describe the solution you'd like**\r\nA way to configure crAPI to return (e.g in the HTTP response body) a user configured custom flag when particular challenges are solved\r\n\r\n**Describe alternatives if any you've considered**\r\nNone at the moment..\r\n\r\n**Additional context**\r\nAdd any other context/screenshots/rough sketchs about the feature request here.", + "summary": "Evaluate the feasibility of using crAPI for Capture-the-Flag (CTF) events. Assess the current configuration capabilities of crAPI to determine if it can be adapted to return user-defined flags in HTTP responses upon solving specific challenges.\n\n1. Review the current crAPI configuration settings and documentation to identify how responses are generated.\n2. Determine if the API can be modified or extended to include custom payloads in response bodies.\n3. Implement a prototype that allows for configurable flags to be set and returned based on challenge completion.\n4. Test the prototype with various challenge scenarios to ensure flags are triggered correctly.\n\nAdditionally, consider potential security implications and ensure that the implementation does not expose sensitive data or vulnerabilities.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/97", @@ -6876,6 +7117,7 @@ "node_id": "I_kwDOFAM8Ps5TNXg8", "title": "Add GRPC service in crAPI ", "body": "**Is your feature request related to a problem? Please describe.**\r\nCurrently, crAPI only supports REST endpoints. We can only identify and demo REST vulnerabilities with crAPI. We should add a GRPC service which will make it easier to demo GRPC vulnerabilities. \r\n\r\n**Describe the solution you'd like**\r\n@piyushroshan, can you please add more context on a plausible solution here?", + "summary": "Implement a gRPC service in crAPI to enhance vulnerability demonstration capabilities. \n\n1. Analyze the current architecture of crAPI to understand how REST endpoints are integrated.\n2. Research gRPC framework and its integration with existing services.\n3. Create a new gRPC service definition using Protocol Buffers (.proto files) that mirrors the REST endpoints.\n4. Develop the gRPC server-side implementation, ensuring it adheres to the same logic as the REST endpoints.\n5. Implement client-side functionality to call the gRPC service, enabling users to demo vulnerabilities.\n6. Test the new gRPC service to ensure functionality and compatibility with existing tools.\n7. Update documentation to include instructions for using the new gRPC service.\n\nEngage with @piyushroshan for additional insights on the solution design and integration specifics.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/110", @@ -6907,6 +7149,7 @@ "node_id": "I_kwDOFAM8Ps5TNYvH", "title": "Add GraphQL service in crAPI", "body": "**Is your feature request related to a problem? Please describe.**\r\nCurrently, crAPI only supports REST endpoints. We can only identify and demo REST vulnerabilities with crAPI. We should add a GraphQL service which will make it easier to demo GraphQL vulnerabilities.\r\n\r\n**Describe the solution you'd like**\r\nA GraphQL API using which we can demonstrate Authorization, Introspection, Batching, Injection, CSRF and other GraphQL attacks. \r\n", + "summary": "Implement GraphQL service in crAPI to extend its capabilities beyond REST endpoints. \n\n1. **Identify Requirements**: Determine the specific GraphQL vulnerabilities to demonstrate, including Authorization, Introspection, Batching, Injection, and CSRF.\n\n2. **Set Up GraphQL Schema**: Create a GraphQL schema that defines types, queries, and mutations relevant to the identified vulnerabilities.\n\n3. **Develop GraphQL Resolvers**: Implement resolvers for the defined queries and mutations, ensuring they are designed to simulate vulnerabilities effectively.\n\n4. **Integrate with crAPI**: Ensure the new GraphQL service is integrated into the existing crAPI framework, allowing users to seamlessly switch between REST and GraphQL endpoints.\n\n5. **Create Documentation**: Document the usage of the new GraphQL service, including examples of how to trigger various vulnerabilities.\n\n6. **Testing**: Conduct thorough testing to ensure the GraphQL service operates as intended and effectively demonstrates the targeted vulnerabilities.\n\nBy following these steps, enhance crAPI's functionality to include a robust GraphQL service for vulnerability demonstration.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/111", @@ -6939,6 +7182,7 @@ "node_id": "I_kwDOFAM8Ps5TNbJk", "title": "Document all the vulnerabilities which currently exist in crAPI", "body": "**Is your feature request related to a problem? Please describe.**\r\nA documentation page around all the vulnerabilities that currently exist in crAPI will be very helpful. This will provide an easy way to evaluate if crAPI is right for our use cases. \r\n\r\n**Describe the solution you'd like**\r\nA table in README or a separate markdown file that lists down all the vulnerabilities crAPI has and maps them to OWASP API Top 10, OWASP Top 10 and CWEs. \r\n\r\n**Additional context**\r\nNot needed", + "summary": "Document all existing vulnerabilities in crAPI. Create a comprehensive documentation page that lists vulnerabilities and maps them to relevant standards such as OWASP API Top 10, OWASP Top 10, and Common Weakness Enumerations (CWEs).\n\n**First Steps:**\n1. Conduct a thorough security assessment of crAPI to identify all vulnerabilities.\n2. Categorize each vulnerability according to OWASP API Top 10 and CWEs.\n3. Draft a table format for the README or create a separate markdown file to present the findings clearly.\n4. Ensure that the documentation is easy to navigate and understand for potential users evaluating crAPI.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/112", @@ -6953,8 +7197,8 @@ "author": 84, "repository": 406, "assignees": [ - 87, - 88 + 88, + 87 ], "labels": [ 58, @@ -6973,6 +7217,7 @@ "node_id": "I_kwDOFAM8Ps5TNckV", "title": "Up-to-date postman collection as per all the supported vulnerabilities", "body": "**Is your feature request related to a problem? Please describe.**\r\nThe current crAPI postman collection is 5 months old and there have been some enhancements afterward. Some of the changes might be in API specs and will need updates in API specs as well as postman collections. \r\n\r\n**Describe the solution you'd like**\r\nCan we have a GitHub workflow which runs on every PR or every week, checks for a diff of postman collection, and updates the postman collection if necessary? ", + "summary": "Update the crAPI Postman collection to reflect all supported vulnerabilities as per recent enhancements. The existing collection is outdated by five months and requires synchronization with the latest API specifications.\n\nImplement a GitHub workflow that triggers on every pull request or on a weekly basis. This workflow should:\n\n1. Compare the current Postman collection with the latest version.\n2. Identify any differences or updates needed in the API specifications.\n3. Automatically update the Postman collection if discrepancies are found.\n\nFirst steps to tackle this problem:\n\n- Review the latest API specifications to identify enhancements made over the last five months.\n- Create a GitHub Actions workflow that runs a script to check for differences in the Postman collection.\n- Use a diff tool to compare the existing collection against the updated version.\n- Develop a mechanism to automatically update the Postman collection based on the results of the comparison, ensuring that any changes are properly committed back to the repository.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/113", @@ -7004,6 +7249,7 @@ "node_id": "I_kwDOFAM8Ps5TNlp7", "title": "Monitoring for crAPI", "body": "**Is your feature request related to a problem? Please describe.**\r\nProvide an easy way to set up crAPI along with monitoring setup. For example, with Kubernetes setup, I should be able to monitor golden signals for my Kubernetes setup with Kube-state metrics. \r\n\r\n**Describe the solution you'd like**\r\nWay to deploy an application with Prometheus and Grafana. The user should be able to create a Grafana dashboard out of the infra metrics from crAPI. We can also provide a dashboard template. \r\n", + "summary": "Implement a monitoring solution for crAPI to facilitate easy deployment and monitoring of Kubernetes setups. \n\n1. **Set Up Deployment**: Create a guide for deploying crAPI alongside Prometheus and Grafana. Include Kubernetes configuration examples for deploying these tools in a cluster.\n\n2. **Integrate Kube-State Metrics**: Ensure that crAPI can collect kube-state metrics for monitoring golden signals. Configure Prometheus to scrape these metrics from the Kubernetes environment.\n\n3. **Develop Grafana Dashboard Templates**: Provide pre-configured Grafana dashboard templates that visualize the infrastructure metrics collected by crAPI. \n\n4. **Document Monitoring Setup**: Create detailed documentation on how to set up monitoring, including how to deploy crAPI with Prometheus and Grafana, and how to use the dashboard templates effectively.\n\n5. **Test the Setup**: Validate the entire monitoring setup by deploying a test application and ensuring that metrics are collected and displayed correctly on the Grafana dashboards.\n\nStart by creating a detailed deployment guide that outlines the necessary configurations and steps for users to follow.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/118", @@ -7033,6 +7279,7 @@ "node_id": "I_kwDOFAM8Ps5TOJMB", "title": "Document Happy Path for crAPI", "body": "**Is your feature request related to a problem? Please describe.**\r\nCurrently, when we start crAPI, we don't know where to click or where to look to generate specific vulnerabilities. We should document a \"happy path\" or principal workflow which will help people to actually understand the working of the crAPI application. \r\n\r\n**Describe the solution you'd like**\r\nA document with \"happy path\" and link it in main README. \r\n\r\n**Describe alternatives if any you've considered**\r\nNA\r\n\r\n**Additional context**\r\nNA", + "summary": "Document the \"happy path\" for crAPI to improve user experience. Create a detailed guide outlining the primary workflow for generating specific vulnerabilities within the application. \n\n1. Identify key features of crAPI that users need to navigate.\n2. Outline steps to access these features, including any prerequisites for setup.\n3. Include screenshots or diagrams to visually guide users through the process.\n4. Ensure the document is clear and concise, emphasizing crucial actions and decisions.\n5. Link the completed document in the main README for easy access.\n\nStart by gathering information from existing users about their experiences and challenges. Use this feedback to structure the document effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/120", @@ -7065,6 +7312,7 @@ "node_id": "I_kwDOFAM8Ps5TOKJS", "title": "Add direct command Injection vulnerability (CWE-77, OWASP API 8)", "body": "**Is your feature request related to a problem? Please describe.**\r\nCurrently, we can use crAPI to demonstrate indirect command injection but we also want to add capabilities to demonstrate direct command injection. \r\n\r\n**Describe the solution you'd like**\r\n@piyushroshan , can you guide us here for a solution?\r\n\r\n", + "summary": "Implement direct command injection vulnerability (CWE-77, OWASP API 8) in crAPI. This enhancement should allow users to demonstrate both indirect and direct command injection attacks.\n\n**First Steps:**\n1. Research direct command injection techniques and identify common payloads.\n2. Create a new endpoint in crAPI specifically designed to showcase direct command injection.\n3. Ensure the endpoint accepts user input that can be executed as a command in the underlying system.\n4. Implement appropriate safeguards in testing environments to prevent any unauthorized access or system damage.\n5. Document the new feature, detailing the payloads used and their expected outcomes for educational purposes.\n6. Collaborate with @piyushroshan for guidance on best practices and integration into the existing framework.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/121", @@ -7097,6 +7345,7 @@ "node_id": "I_kwDOFAM8Ps5TOM1k", "title": "Add Security Misconfiguration vulnerabilities in crAPI", "body": "**Is your feature request related to a problem? Please describe.**\r\nWe should be able to demo security misconfiguration vulnerabilities with crAPI. Security misconfiguration falls under [API7:2019 Security Misconfiguration](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xa7-security-misconfiguration.md). \r\n\r\n\r\n\r\n**Describe the solution you'd like**\r\nThere are CWEs which fall under security misconfigs:\r\n\r\n- [CWE-2: Environmental Security Flaws](https://cwe.mitre.org/data/definitions/2.html)\r\n- [CWE-16: Configuration](https://cwe.mitre.org/data/definitions/16.html)\r\n- [CWE-388: Error Handling](https://cwe.mitre.org/data/definitions/388.html)\r\n\r\nWe want to add capability in crAPI to be able to demo all three of them. \r\n\r\n**Describe alternatives if any you've considered**\r\nNA\r\n\r\n**Additional context**\r\nNA", + "summary": "Add Security Misconfiguration Vulnerabilities to crAPI.\n\n1. Implement demo capabilities for security misconfiguration vulnerabilities, specifically targeting API7:2019 Security Misconfiguration.\n2. Focus on incorporating the following Common Weakness Enumerations (CWEs):\n - CWE-2: Environmental Security Flaws\n - CWE-16: Configuration\n - CWE-388: Error Handling\n3. Create test cases and scenarios that effectively demonstrate each of the identified CWEs within the crAPI framework.\n\n**First Steps:**\n- Research and gather detailed information on the security misconfigurations represented by the specified CWEs.\n- Design and outline the architecture for integrating these vulnerabilities into crAPI.\n- Develop initial proof-of-concept implementations for each CWE to validate the approach.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/122", @@ -7126,6 +7375,7 @@ "node_id": "I_kwDOFAM8Ps5TOPPy", "title": "Add a way to demonstrate insufficient logging and monitoring vulnerabilities in crAPI", "body": "**Is your feature request related to a problem? Please describe.**\r\ncrAPI doesn't have the capability to demo insufficient logging and monitoring vulnerability which is [OWASP API 10:2019](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xaa-insufficient-logging-monitoring.md). \r\n\r\n**Describe the solution you'd like**\r\n[API10:2019 Insufficient Logging & Monitoring](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xaa-insufficient-logging-monitoring.md) includes two CWEs:\r\n\r\n- [CWE-223: Omission of Security-relevant Information](https://cwe.mitre.org/data/definitions/223.html)\r\n- [CWE-778: Insufficient Logging](https://cwe.mitre.org/data/definitions/778.html)\r\n\r\nWe want to add the capability to demonstrate both of them. \r\n\r\n**Describe alternatives if any you've considered**\r\nNA\r\n\r\n**Additional context**\r\nNA", + "summary": "Implement a demonstration of insufficient logging and monitoring vulnerabilities in crAPI. Address the absence of capability to showcase vulnerabilities related to [OWASP API 10:2019](https://github.com/OWASP/API-Security/blob/master/2019/en/src/0xaa-insufficient-logging-monitoring.md).\n\n1. Analyze the OWASP documentation for Insufficient Logging and Monitoring, focusing on the two relevant CWEs: \n - [CWE-223: Omission of Security-relevant Information](https://cwe.mitre.org/data/definitions/223.html)\n - [CWE-778: Insufficient Logging](https://cwe.mitre.org/data/definitions/778.html)\n\n2. Design an architecture that allows the simulation of these vulnerabilities within the crAPI environment.\n\n3. Develop logging features that intentionally omit critical security information to represent CWE-223.\n\n4. Create scenarios where logging is minimal or absent entirely to demonstrate CWE-778.\n\n5. Test the implementation to ensure it effectively showcases the vulnerabilities as intended.\n\n6. Document the process and findings to facilitate understanding and education on the vulnerabilities.\n\n7. Consider integrating automated tests to verify the presence of these vulnerabilities in future builds.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/123", @@ -7155,6 +7405,7 @@ "node_id": "I_kwDOFAM8Ps5Tfute", "title": "Easy mode and Challenge mode for using crAPI", "body": "**Is your feature request related to a problem? Please describe.**\r\nThere should be 2 ways to use crAPI. \r\n- Easy mode would have an easily accessible swagger file. \r\n- Challenge mode would not.\r\n\r\n**Describe the solution you'd like**\r\nAs above.", + "summary": "Implement two usage modes for crAPI: Easy mode and Challenge mode. \n\n1. **Easy Mode**: Create an easily accessible Swagger file that provides a user-friendly interface for API exploration. Ensure that the documentation is clear and includes examples for common use cases.\n\n2. **Challenge Mode**: Remove the Swagger file and require users to interact with the API through direct HTTP requests, encouraging a deeper understanding of the API's functionality.\n\n**First Steps**:\n- Review the current crAPI documentation and identify the modifications needed to create the Swagger file for Easy mode.\n- Set up a separate branch for the Challenge mode implementation.\n- Evaluate the current API endpoints to ensure they are well-documented and can be effectively used without Swagger.\n- Gather user feedback on preferred features for both modes to guide development.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/127", @@ -7184,6 +7435,7 @@ "node_id": "I_kwDOFAM8Ps5TfywR", "title": "Video upload functionality is not eminent", "body": "**Is your feature request related to a problem? Please describe.**\r\nCurrently, an upload video functionality is not eminent on the edit profile screen. Here's how it looks like:\r\n\"Screenshot\r\n\r\n**Describe the solution you'd like**\r\nWe can add a rectangular upload box to make it prominent. \r\n\r\n**Additional context**\r\n\"Screenshot\r\n", + "summary": "Implement a prominent video upload functionality on the edit profile screen. \n\n1. Add a rectangular upload box to enhance visibility.\n2. Ensure the upload box is clearly labeled to guide users.\n3. Use appropriate styling to differentiate the upload box from other elements on the screen.\n\n**First Steps:**\n- Review the current layout of the edit profile screen.\n- Design a mockup of the upload box with dimensions and positioning.\n- Update the front-end code to integrate the new upload box, using HTML and CSS.\n- Implement JavaScript functionality to handle video uploads, including file type validation and size limits.\n- Test the feature across different devices and browsers for compatibility.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/129", @@ -7215,6 +7467,7 @@ "node_id": "I_kwDOFAM8Ps5ccaC_", "title": "Docker cannot run services in this part of the web, but other services are available", "body": "![image](https://user-images.githubusercontent.com/111032533/213723357-9b64471f-1aa6-4218-a2bd-a711523dbaf0.png)\r\n", + "summary": "Investigate the issue where Docker cannot run services in the specified area of the web, despite other services functioning correctly. \n\n1. Check Docker's network configuration to ensure it is not restricted from accessing the required web services.\n2. Verify the firewall settings on the host machine to rule out any blocking of Docker's outbound connections.\n3. Examine the Docker logs for any error messages or warnings that may indicate the root cause of the problem.\n4. Test the connectivity from within a Docker container using tools like `curl` or `ping` to diagnose network issues.\n5. Review Docker's version and configuration to ensure compatibility with the services being accessed.\n\nBegin troubleshooting by running a simple container and attempting to reach the problematic services to gather more information.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/167", @@ -7241,6 +7494,7 @@ "node_id": "I_kwDOFAM8Ps5eUVoL", "title": "ERROR: Encountered errors while bringing up the project.", "body": "![image](https://user-images.githubusercontent.com/61447785/218465456-740dee69-8e14-430b-ae8a-5496e16e4376.png)\r\n", + "summary": "Investigate and resolve the project startup errors indicated in the GitHub issue. Review the error message from the screenshot for specific details on what is failing during the project launch. \n\nFirst steps:\n1. Check the project dependencies and ensure they are correctly defined in the configuration files (e.g., `package.json`, `requirements.txt`).\n2. Verify that all required services (e.g., databases, caches) are running and accessible.\n3. Review the Docker configuration files (e.g., `Dockerfile`, `docker-compose.yml`) for any misconfigurations that might prevent the services from starting.\n4. Run the project in a local development environment to reproduce the error and gather more information.\n5. Check version compatibility of all software components involved (e.g., libraries, frameworks) and update if necessary. \n6. Look for any recent changes in the project that might have introduced this issue, and consider rolling back to a previous stable version.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/171", @@ -7269,6 +7523,7 @@ "node_id": "I_kwDOFAM8Ps5ekgdL", "title": "dependency failed to start: container crapi-workshop exited", "body": "**Describe the bug**\r\nAfter running \"docker compose -f docker-compose.yml --compatibility up -d\" I receive the error: dependency failed to start: container crapi-workshop exited (0)\r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior.\r\n1. Use the commands listed on github:\r\n (curl -o docker-compose.yml https://raw.githubusercontent.com/OWASP/crAPI/main/deploy/docker/docker- compose.yml\r\n docker compose pull\r\n docker compose -f docker-compose.yml --compatibility up -d) with Network of the VM set to NAT. Behaviour of crapi: all containers start and no errors are received.\r\n2. Shutdown the VM, change Network to internal network, and start VM. Use the last command and you'll receive the error.\r\n\r\n**Expected behavior**\r\nAll containers should start.\r\n\r\n**Runtime Environment**\r\nSytem/Environemnt information (e.g Output of docker -v and uname -a)\r\n-Debian 11 running in VirtualBox 7\r\n-Network of VM changed from the default NAT to Internal Network\r\n-Docker Engine - Community Version: 23.0.1\r\n-Docker Compose version v2.16.0\r\n[crapi-workshop log.txt](https://github.com/OWASP/crAPI/files/10748201/crapi-workshop.log.txt)\r\n", + "summary": "Investigate the \"dependency failed to start: container crapi-workshop exited\" error when running the Docker Compose setup. \n\n**Steps to Reproduce:**\n1. Execute the following commands to set up the environment:\n - Download the Docker Compose file:\n ```bash\n curl -o docker-compose.yml https://raw.githubusercontent.com/OWASP/crAPI/main/deploy/docker/docker-compose.yml\n ```\n - Pull the necessary images:\n ```bash\n docker compose pull\n ```\n - Start the containers:\n ```bash\n docker compose -f docker-compose.yml --compatibility up -d\n ```\n2. Initially, run this in a Virtual Machine (VM) with the network set to NAT; all containers should start without errors.\n3. Shut down the VM, change the network setting to Internal Network, and restart the VM. Re-run the last command to see the error.\n\n**Expected Outcome:**\nAll containers should start successfully regardless of the network configuration.\n\n**Environment Details:**\n- Debian 11 in VirtualBox 7\n- Docker Engine - Community Version: 23.0.1\n- Docker Compose version: v2.16.0\n\n**First Steps to Tackle the Problem:**\n1. Check the logs of the `crapi-workshop` container for specific error messages. Examine the provided log file for any relevant details.\n2. Verify network configurations and ensure that all necessary ports are open for communication between containers in the Internal Network setup.\n3. Review the Docker Compose file for any environment variables or services that might be affected by the network mode change.\n4. Test if other containers can communicate properly in the new network setup to rule out broader networking issues.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/172", @@ -7297,6 +7552,7 @@ "node_id": "I_kwDOFAM8Ps5fAZhY", "title": "Correct password requirements", "body": "# Description\r\n\r\nThe guide to create a password at the sign up says \"Password should contain at least one digit, one small letter and one capital letter and should at least contain 8 characters.\", however, there is one requirement that is not described here. The password must have an special character.\r\n\r\n## To Reproduce\r\n\r\nJust go to the sign up page and type a valid password according to the description such as \"1234567qW\", but the password will be invalid, then try adding a special character like \"1234567qW@\", and the password will be accepted.\r\n\r\n### Expected behavior\r\n\r\nExpected that the password description would be correct, like \"Password should contain at least one digit, one special charater, one small letter and one capital letter and should at least contain 8 characters.\"\r\n\r\n\r\n#### Runtime Environment\r\n\r\nDocker version 20.10.13 on Ubuntu 20 x86_64\r\n", + "summary": "Update password requirements in the sign-up guide. Ensure the description clearly states that a password must include at least one special character, in addition to one digit, one lowercase letter, one uppercase letter, and a minimum of 8 characters.\n\n### Steps to Address:\n\n1. Review the current sign-up page for the password requirements text.\n2. Modify the password requirements section to include special character stipulation.\n3. Test the sign-up functionality with various password combinations to confirm that the validation logic aligns with the updated requirements.\n4. Update any related documentation or user guides to reflect the new password requirements.\n5. Deploy the changes and verify on the staging environment before pushing to production.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/173", @@ -7325,6 +7581,7 @@ "node_id": "I_kwDOFAM8Ps5fNAX4", "title": "crapi-workshop is unhealthy", "body": "![image](https://user-images.githubusercontent.com/95041015/220981582-28b25ffa-93d7-4792-be9a-a27eccd3284f.png)", + "summary": "Investigate the unhealthy status of the crapi-workshop. Examine the provided image for error messages or indicators of failure. \n\nCheck the health checks configured for the service and verify if they are correctly set up. Review the logs for any recent errors or anomalies that could point to the root cause of the issue.\n\nEnsure that all required services and dependencies are running and accessible. Check for resource constraints such as memory or CPU limits that may be affecting the service's performance. \n\nStart by:\n1. Reviewing the health check configuration and logs.\n2. Verifying service dependencies are operational.\n3. Monitoring resource usage to identify potential bottlenecks. \n\nDocument findings and propose fixes based on the gathered information.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/175", @@ -7353,6 +7610,7 @@ "node_id": "I_kwDOFAM8Ps5fUmBQ", "title": "localhost:8888 is unreachable", "body": "I can't reach localhost:8888 but I can reach localhost:8025. what is the solution to this problem?\r\n\r\nAlso when I checked the health of the containers, I am seeing that crapi identity is exited 1 minute ago\r\nIs there anyone who experienced this trouble?\r\n\r\nI checked the crapi-identity container's logs and the first issue is PostgreSQL FATAL: database \"crapi\" does not exist.\r\nIn addition: I have got 3 containers that are alive.\r\n", + "summary": "Check connectivity to localhost:8888. Since localhost:8025 is accessible, investigate configuration settings related to the service running on port 8888.\n\nExamine the crapi-identity container, which has exited recently. Review logs to confirm the issue: \"PostgreSQL FATAL: database 'crapi' does not exist.\" \n\nTo tackle the problem:\n\n1. **Verify Database Creation**: Ensure that the PostgreSQL database named \"crapi\" is created. If it doesn't exist, create it using the appropriate SQL command or configuration.\n\n2. **Inspect Container Networking**: Confirm that the crapi-identity container is correctly configured to connect to PostgreSQL. Check environment variables and service links.\n\n3. **Check Docker Compose Configuration**: Review your Docker Compose file (if applicable) for any misconfigurations related to the crapi-identity service and PostgreSQL service.\n\n4. **Restart Containers**: After confirming the database exists and configurations are correct, restart the containers.\n\n5. **Monitor Logs**: Continuously monitor the logs of crapi-identity and PostgreSQL to ensure all services are functioning properly.\n\n6. **Test the Endpoint**: After addressing the database issue, test the localhost:8888 endpoint again to verify accessibility.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/176", @@ -7381,6 +7639,7 @@ "node_id": "I_kwDOFAM8Ps5gA2xK", "title": "crapi-web Container is unhealthy", "body": "crapi-web log:\r\nCreating test database for alias 'default'...\r\nCreating test database for alias 'mongodb'...\r\nroot ERROR /workshop/api/mechanic/signup - {'name': 'MechRaju', 'email': 'mechraju@crapi.com', 'mechanic_code': 'TRAC_MEC_3', 'number': '9123456708'} - 400 -{'password': [ErrorDetail(string='This field is required.', code='required')]}\r\ndjango.request WARNING Bad Request: /workshop/api/mechanic/signup\r\n.django.request WARNING Bad Request: /workshop/api/mechanic/signup\r\n.django.request WARNING Bad Request: /workshop/api/mechanic/signup\r\n.django.request WARNING Unauthorized: /workshop/api/mechanic/\r\ndjango.request WARNING Unauthorized: /workshop/api/mechanic/\r\n.django.request WARNING Unauthorized: /workshop/api/mechanic/\r\n..root ERROR /workshop/api/merchant/contact_mechanic - {'mechanic_api': 'https://www.google.com', 'number_of_repeats': 5, 'mechanic_code': 'TRAC_MEC_3', 'vin': '9NFXO86WBWA082766', 'problem_details': 'My Car is not working'} - 400 -{'repeat_request_if_failed': [ErrorDetail(string='This field is required.', code='required')]}\r\ndjango.request WARNING Bad Request: /workshop/api/merchant/contact_mechanic\r\n.root INFO Repeat count: 0\r\nroot INFO Repeat count: 1\r\nroot INFO Repeat count: 2\r\nroot INFO Repeat count: 3\r\nroot INFO Repeat count: 4\r\nroot INFO Repeat count: 5\r\ndjango.request WARNING Bad Request: /workshop/api/merchant/contact_mechanic\r\n....................\r\n\r\nRuntime Environment\r\nDocker version 20.10.12, build 20.10.12-0ubuntu4\r\ndocker-compose version 1.29.2, build unknown\r\nubuntu-desktop 22.04 amd64 \r\n", + "summary": "Investigate the unhealthy state of the crapi-web container. Analyze logs indicating multiple Bad Request (400) errors for the `/workshop/api/mechanic/signup` and `/workshop/api/merchant/contact_mechanic` endpoints. \n\nIdentify the missing required fields in the requests:\n- For signup, ensure the `password` field is included in the payload.\n- For contact mechanic, ensure `repeat_request_if_failed` is present.\n\nCheck the server's response to unauthorized access attempts, suggesting potential issues with authentication or token validation for the `/workshop/api/mechanic/` endpoint.\n\nExamine the Docker and docker-compose versions for compatibility with the application. \n\nPossible first steps:\n1. Modify the request payloads to include the missing required fields.\n2. Verify the authentication mechanism and ensure valid tokens are being used.\n3. Restart the crapi-web container and monitor logs for changes in health status.\n4. Test the API endpoints manually or with a tool like Postman to confirm they respond correctly with the expected payloads.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/178", @@ -7409,6 +7668,7 @@ "node_id": "I_kwDOFAM8Ps5iA6Ep", "title": "Can't reach localhost:8888 after suspending my virtual machine", "body": "**Describe the bug**\r\nI successfully downloaded crAPI and it was running on my Kali virtual machine but when I suspended my host and started it again localhost:8888 was not opening. It kept on loading.\r\n\r\n**Expected behavior**\r\nIt should load every time i try opening 127.0.0.1:8888 \r\n\r\n **Environment**\r\nSytem/Environemnt information \r\n\r\ndocker-compose version 1.29.2, built unknown \r\ndocker-py version : 5.0.3\r\nCPython version : 3.11.2\r\nOpenSSL version : OpenSSL 3.0.8 7 Feb 2023\r\nOperating system : Kali Linux Debian 6.1.20-kali1", + "summary": "- Investigate the issue of not reaching localhost:8888 after suspending and resuming the virtual machine.\n- Ensure that the crAPI service is still running in the Docker container after the VM resumes. \n- Check the status of the Docker containers by running `docker ps` to confirm that the crAPI container is active.\n- If the crAPI container is not running, restart it using `docker-compose up -d` from the directory containing the docker-compose.yml file.\n- Verify that the port 8888 is correctly mapped in the docker-compose.yml file.\n- If the service is running but still inaccessible, check for any network issues or firewall settings that may be affecting the connection.\n- Consider restarting Docker service with `sudo systemctl restart docker` to reset any network configurations.\n- Monitor the Docker logs for any errors using `docker-compose logs` to identify potential issues.\n- Test accessing crAPI using `curl http://127.0.0.1:8888` from the terminal to isolate the problem further. \n- If issues persist, consider reconfiguring the network settings of the VM or Docker to ensure proper communication.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/182", @@ -7437,6 +7697,7 @@ "node_id": "I_kwDOFAM8Ps5jEoZ7", "title": "Coupon and Coupons are different collections ?", "body": "https://github.com/OWASP/crAPI/blob/df1be10efa127a9cf1e0f9479836bedc980b9f36/services/community/api/models/coupon.go#L61\r\n\r\nDoes it intentional typo or not, because I can create a coupon but can't use that.\r\n\r\nPay attention on the validate coupon func and a collection name that used.\r\n\r\nI recommend to use constants for table/collection names if you'll pass this manually in every calls.", + "summary": "Clarify whether \"Coupon\" and \"Coupons\" represent different collections in the codebase. Check the implementation of the `validate coupon` function and the collection name utilized within it to understand the discrepancy. Investigate if the inability to use a created coupon is due to a naming inconsistency.\n\nTo tackle this issue:\n\n1. Review the definition of both \"Coupon\" and \"Coupons\" in the `coupon.go` file.\n2. Examine the `validate coupon` function for how it references the collection names.\n3. Test the creation and validation of a coupon to reproduce the issue.\n4. Propose the implementation of constants for collection/table names to ensure consistency across the codebase. This will help prevent similar issues in the future.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/186", @@ -7463,6 +7724,7 @@ "node_id": "I_kwDOFAM8Ps5mQX4w", "title": "Traceback (most recent call last): crapi-workshop | File \"/app/crapi/merchant/tests.py\", line 134, in test_contact_mechanic", "body": "Hi, I install by docker,but find a bug in startuping! \r\n\r\n![Snipaste_2023-05-18_20-32-02](https://github.com/OWASP/crAPI/assets/68784291/11ca10a0-9487-4eb3-82e2-8cdb2ca53a69)\r\n\r\nThe information in this screenshot appears several times and may be helpful for you to debug.\r\n\r\n![Snipaste_2023-05-18_20-29-05](https://github.com/OWASP/crAPI/assets/68784291/598bde63-c409-4208-bc19-d12cefcb34bb)\r\n\r\n\r\n\r\n", + "summary": "Investigate the bug occurring during Docker startup for the crAPI project. The traceback indicates an issue in the `test_contact_mechanic` function located in `merchant/tests.py` at line 134. Review the error messages shown in the provided screenshots, as they may contain critical information for debugging.\n\nFirst steps to tackle the problem:\n\n1. Reproduce the issue by running the Docker container and executing the relevant tests.\n2. Analyze the traceback to identify the root cause of the failure in the `test_contact_mechanic` function.\n3. Check for any missing dependencies or configuration issues in the Docker setup that could lead to this error.\n4. Review recent changes in the codebase that might have affected the functionality being tested.\n5. Consult the documentation or community forums for similar issues encountered by other users.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/200", @@ -7491,6 +7753,7 @@ "node_id": "I_kwDOFAM8Ps5nM_ig", "title": " Install crapi for the first time(ERROR: for crapi-web Container \"28eac0bf7569\" is unhealthy.)", "body": "**Describe the bug**\r\n\r\n```\r\n$: sudo docker-compose -f docker-compose.yml --compatibility up -d\r\nmongodb is up-to-date\r\nmailhog is up-to-date\r\napi.crapi.io is up-to-date\r\npostgresdb is up-to-date\r\ncrapi-identity is up-to-date\r\ncrapi-community is up-to-date\r\nStarting crapi-workshop ... done\r\n\r\nERROR: for crapi-web Container \"28eac0bf7569\" is unhealthy.\r\nERROR: Encountered errors while bringing up the project.\r\n\r\n```\r\n\r\n**Runtime Environment**\r\n \r\n\r\n$: uname -a\r\n```\r\nLinux wg-virtual-machine 5.19.0-41-generic #42~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue Apr 18 17:40:00 UTC 2 x86_64 x86_64 x86_64 GNU/Linux\r\n```\r\n\r\n```\r\n$: sudo docker version\r\n\r\nClient: Docker Engine - Community\r\n Version: 24.0.2\r\n API version: 1.43\r\n Go version: go1.20.4\r\n Git commit: cb74dfc\r\n Built: Thu May 25 21:51:00 2023\r\n OS/Arch: linux/amd64\r\n Context: default\r\n\r\nServer: Docker Engine - Community\r\n Engine:\r\n Version: 24.0.2\r\n API version: 1.43 (minimum version 1.12)\r\n Go version: go1.20.4\r\n Git commit: 659604f\r\n Built: Thu May 25 21:51:00 2023\r\n OS/Arch: linux/amd64\r\n Experimental: false\r\n containerd:\r\n Version: 1.6.21\r\n GitCommit: 3dce8eb055cbb6872793272b4f20ed16117344f8\r\n runc:\r\n Version: 1.1.7\r\n GitCommit: v1.1.7-0-g860f061\r\n docker-init:\r\n Version: 0.19.0\r\n```\r\n\r\n```\r\nsudo docker-compose version\r\n\r\ndocker-compose version 1.29.2, build unknown\r\ndocker-py version: 5.0.3\r\nCPython version: 3.10.6\r\n```\r\n\r\n", + "summary": "Investigate the \"unhealthy\" status of the `crapi-web` container during the initial installation of crapi. Perform the following steps:\n\n1. **Check Container Logs**: Run `sudo docker logs crapi-web` to examine the logs for any errors or issues that may indicate why the container is unhealthy.\n\n2. **Inspect Health Check Configuration**: Review the Dockerfile or docker-compose.yml for `crapi-web` to identify the health check command and ensure it is correctly configured. \n\n3. **Validate Dependencies**: Ensure all dependencies and services required by `crapi-web` are running correctly. Confirm the statuses of `mongodb`, `postgresdb`, and other related services.\n\n4. **Increase Health Check Timeout**: If the health check command requires more time to respond, adjust the timeout settings in the health check configuration.\n\n5. **Resource Allocation**: Verify that the system has sufficient resources (CPU, memory) allocated to Docker. Use `docker stats` to monitor resource usage.\n\n6. **Network Configuration**: Ensure that there are no network-related issues preventing `crapi-web` from communicating with its dependencies.\n\n7. **Update Docker and Docker Compose**: Ensure that you are running the latest stable versions of Docker and Docker Compose. Upgrade if necessary.\n\n8. **Rebuild the Image**: If the problem persists, consider rebuilding the `crapi-web` image using `sudo docker-compose build crapi-web` and then restart the services.\n\n9. **Consult Documentation**: Refer to the official crapi documentation or GitHub repository for any specific troubleshooting tips related to the `crapi-web` container.\n\nBy following these steps, you should be able to diagnose and potentially resolve the \"unhealthy\" status of the `crapi-web` container.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/204", @@ -7519,6 +7782,7 @@ "node_id": "I_kwDOFAM8Ps5oDh58", "title": "Container Mongodb error ", "body": "this is a result of an error that I have been facing for 2 days now and if the mongodb is healhty the crapi_community shows an error \r\nI have just to run this command and wait which one gonna show an error \r\n$sudo VERSION=develop docker-compose -f docker-compose.yml --compatibility up -d[+] Building 0.0s (0/0) \r\n[+] Running 6/6\r\n ✔ Container api.crapi.io Running 0.0s \r\n ✔ Container postgresdb Healthy 0.5s \r\n ✘ Container mongodb Error 13.6s \r\n ✔ Container mailhog Running 0.0s \r\n ✔ Container crapi-identity Created 0.0s \r\n ✔ Container crapi-community Created\r\n\r\n\r\n\r\n\r\n\r\ndocker logs mongo db\r\n\r\nWARNING: MongoDB 5.0+ requires a CPU with AVX support, and your current system does not appear to have that!\r\n see https://jira.mongodb.org/browse/SERVER-54407\r\n see also https://www.mongodb.com/community/forums/t/mongodb-5-0-cpu-intel-g4650-compatibility/116610/2\r\n see also https://github.com/docker-library/mongo/issues/485#issuecomment-891991814\r\n\r\nabout to fork child process, waiting until server is ready for connections.\r\nforked process: 30\r\n\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:15.470+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":20698, \"ctx\":\"main\",\"msg\":\"***** SERVER RESTARTED *****\"}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:15.472+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23285, \"ctx\":\"main\",\"msg\":\"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'\"}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:15.474+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4648601, \"ctx\":\"main\",\"msg\":\"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize.\"}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:15.475+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4615611, \"ctx\":\"initandlisten\",\"msg\":\"MongoDB starting\",\"attr\":{\"pid\":30,\"port\":27017,\"dbPath\":\"/data/db\",\"architecture\":\"64-bit\",\"host\":\"a636cf2cec8a\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:15.475+00:00\"},\"s\":\"W\", \"c\":\"CONTROL\", \"id\":20720, \"ctx\":\"initandlisten\",\"msg\":\"Available memory is less than system memory\",\"attr\":{\"availableMemSizeMB\":128,\"systemMemSizeMB\":3927}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:15.475+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23403, \"ctx\":\"initandlisten\",\"msg\":\"Build Info\",\"attr\":{\"buildInfo\":{\"version\":\"4.4.22\",\"gitVersion\":\"fc832685b99221cffb1f5bb5a4ff5ad3e1c416b2\",\"openSSLVersion\":\"OpenSSL 1.1.1f 31 Mar 2020\",\"modules\":[],\"allocator\":\"tcmalloc\",\"environment\":{\"distmod\":\"ubuntu2004\",\"distarch\":\"x86_64\",\"target_arch\":\"x86_64\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:15.475+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":51765, \"ctx\":\"initandlisten\",\"msg\":\"Operating System\",\"attr\":{\"os\":{\"name\":\"Ubuntu\",\"version\":\"20.04\"}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:15.475+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":21951, \"ctx\":\"initandlisten\",\"msg\":\"Options set by command line\",\"attr\":{\"options\":{\"net\":{\"bindIp\":\"127.0.0.1\",\"port\":27017,\"tls\":{\"mode\":\"disabled\"}},\"processManagement\":{\"fork\":true,\"pidFilePath\":\"/tmp/docker-entrypoint-temp-mongod.pid\"},\"systemLog\":{\"destination\":\"file\",\"logAppend\":true,\"path\":\"/proc/1/fd/1\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:15.476+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22300, \"ctx\":\"initandlisten\",\"msg\":\"The configured WiredTiger cache size is more than 80% of available RAM. See http://dochub.mongodb.org/core/faq-memory-diagnostics-wt\",\"tags\":[\"startupWarnings\"]}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:15.476+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22315, \"ctx\":\"initandlisten\",\"msg\":\"Opening WiredTiger\",\"attr\":{\"config\":\"create,cache_size=256M,session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000,close_scan_interval=10,close_handle_minimum=250),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress,compact_progress],\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.241+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686132736:241776][30:0x7fc634e35cc0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Set global recovery timestamp: (0, 0)\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.241+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686132736:241884][30:0x7fc634e35cc0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Set global oldest timestamp: (0, 0)\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.259+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4795906, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger opened\",\"attr\":{\"durationMillis\":783}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.259+00:00\"},\"s\":\"I\", \"c\":\"RECOVERY\", \"id\":23987, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger recoveryTimestamp\",\"attr\":{\"recoveryTimestamp\":{\"$timestamp\":{\"t\":0,\"i\":0}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.341+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22262, \"ctx\":\"initandlisten\",\"msg\":\"Timestamp monitor starting\"}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.357+00:00\"},\"s\":\"W\", \"c\":\"CONTROL\", \"id\":22120, \"ctx\":\"initandlisten\",\"msg\":\"Access control is not enabled for the database. Read and write access to data and configuration is unrestricted\",\"tags\":[\"startupWarnings\"]}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.357+00:00\"},\"s\":\"W\", \"c\":\"CONTROL\", \"id\":22178, \"ctx\":\"initandlisten\",\"msg\":\"/sys/kernel/mm/transparent_hugepage/enabled is 'always'. We suggest setting it to 'never'\",\"tags\":[\"startupWarnings\"]}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.359+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":20320, \"ctx\":\"initandlisten\",\"msg\":\"createCollection\",\"attr\":{\"namespace\":\"admin.system.version\",\"uuidDisposition\":\"provided\",\"uuid\":{\"uuid\":{\"$uuid\":\"6963ed1a-f25c-4956-a0c4-19f00a0e8fd1\"}},\"options\":{\"uuid\":{\"$uuid\":\"6963ed1a-f25c-4956-a0c4-19f00a0e8fd1\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.788+00:00\"},\"s\":\"I\", \"c\":\"INDEX\", \"id\":20345, \"ctx\":\"initandlisten\",\"msg\":\"Index build: done building\",\"attr\":{\"buildUUID\":null,\"namespace\":\"admin.system.version\",\"index\":\"_id_\",\"commitTimestamp\":{\"$timestamp\":{\"t\":0,\"i\":0}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.788+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":20459, \"ctx\":\"initandlisten\",\"msg\":\"Setting featureCompatibilityVersion\",\"attr\":{\"newVersion\":\"4.4\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.788+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":20536, \"ctx\":\"initandlisten\",\"msg\":\"Flow Control is enabled on this deployment\"}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.790+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":20320, \"ctx\":\"initandlisten\",\"msg\":\"createCollection\",\"attr\":{\"namespace\":\"local.startup_log\",\"uuidDisposition\":\"generated\",\"uuid\":{\"uuid\":{\"$uuid\":\"f0272e6b-f2fc-40ed-990f-3957e3e132eb\"}},\"options\":{\"capped\":true,\"size\":10485760}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.830+00:00\"},\"s\":\"I\", \"c\":\"INDEX\", \"id\":20345, \"ctx\":\"initandlisten\",\"msg\":\"Index build: done building\",\"attr\":{\"buildUUID\":null,\"namespace\":\"local.startup_log\",\"index\":\"_id_\",\"commitTimestamp\":{\"$timestamp\":{\"t\":0,\"i\":0}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.831+00:00\"},\"s\":\"I\", \"c\":\"FTDC\", \"id\":20625, \"ctx\":\"initandlisten\",\"msg\":\"Initializing full-time diagnostic data capture\",\"attr\":{\"dataDirectory\":\"/data/db/diagnostic.data\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.831+00:00\"},\"s\":\"I\", \"c\":\"REPL\", \"id\":6015317, \"ctx\":\"initandlisten\",\"msg\":\"Setting new configuration state\",\"attr\":{\"newState\":\"ConfigReplicationDisabled\",\"oldState\":\"ConfigPreStart\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.897+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":23015, \"ctx\":\"listener\",\"msg\":\"Listening on\",\"attr\":{\"address\":\"/tmp/mongodb-27017.sock\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.897+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":23015, \"ctx\":\"listener\",\"msg\":\"Listening on\",\"attr\":{\"address\":\"127.0.0.1\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:16.897+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":23016, \"ctx\":\"listener\",\"msg\":\"Waiting for connections\",\"attr\":{\"port\":27017,\"ssl\":\"off\"}}\r\nchild process started successfully, parent exiting\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:17.574+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"LogicalSessionCacheRefresh\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"config.system.sessions\",\"command\":{\"listIndexes\":\"system.sessions\",\"cursor\":{},\"$db\":\"config\"},\"numYields\":0,\"ok\":0,\"errMsg\":\"ns does not exist: config.system.sessions\",\"errName\":\"NamespaceNotFound\",\"errCode\":26,\"reslen\":134,\"locks\":{\"FeatureCompatibilityVersion\":{\"acquireCount\":{\"r\":1}},\"ReplicationStateTransition\":{\"acquireCount\":{\"w\":1}},\"Global\":{\"acquireCount\":{\"r\":1}},\"Database\":{\"acquireCount\":{\"r\":1}},\"Collection\":{\"acquireCount\":{\"r\":1}},\"Mutex\":{\"acquireCount\":{\"r\":1}}},\"protocol\":\"op_msg\",\"durationMillis\":677}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:17.581+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"LogicalSessionCacheReap\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"config.system.sessions\",\"command\":{\"listIndexes\":\"system.sessions\",\"cursor\":{},\"$db\":\"config\"},\"numYields\":0,\"ok\":0,\"errMsg\":\"ns does not exist: config.system.sessions\",\"errName\":\"NamespaceNotFound\",\"errCode\":26,\"reslen\":134,\"locks\":{\"FeatureCompatibilityVersion\":{\"acquireCount\":{\"r\":1}},\"ReplicationStateTransition\":{\"acquireCount\":{\"w\":1}},\"Global\":{\"acquireCount\":{\"r\":1}},\"Database\":{\"acquireCount\":{\"r\":1}},\"Collection\":{\"acquireCount\":{\"r\":1}},\"Mutex\":{\"acquireCount\":{\"r\":1}}},\"protocol\":\"op_msg\",\"durationMillis\":684}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:17.668+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":20320, \"ctx\":\"LogicalSessionCacheRefresh\",\"msg\":\"createCollection\",\"attr\":{\"namespace\":\"config.system.sessions\",\"uuidDisposition\":\"generated\",\"uuid\":{\"uuid\":{\"$uuid\":\"611d3916-8a82-4f2e-9c68-b54e0a1481fb\"}},\"options\":{}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:17.887+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":20712, \"ctx\":\"LogicalSessionCacheReap\",\"msg\":\"Sessions collection is not set up; waiting until next sessions reap interval\",\"attr\":{\"error\":\"NamespaceNotFound: config.system.sessions does not exist\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:18.672+00:00\"},\"s\":\"I\", \"c\":\"INDEX\", \"id\":20345, \"ctx\":\"LogicalSessionCacheRefresh\",\"msg\":\"Index build: done building\",\"attr\":{\"buildUUID\":null,\"namespace\":\"config.system.sessions\",\"index\":\"_id_\",\"commitTimestamp\":{\"$timestamp\":{\"t\":0,\"i\":0}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:18.672+00:00\"},\"s\":\"I\", \"c\":\"INDEX\", \"id\":20345, \"ctx\":\"LogicalSessionCacheRefresh\",\"msg\":\"Index build: done building\",\"attr\":{\"buildUUID\":null,\"namespace\":\"config.system.sessions\",\"index\":\"lsidTTLIndex\",\"commitTimestamp\":{\"$timestamp\":{\"t\":0,\"i\":0}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:18.672+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"LogicalSessionCacheRefresh\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"config.system.sessions\",\"command\":{\"createIndexes\":\"system.sessions\",\"indexes\":[{\"key\":{\"lastUse\":1},\"name\":\"lsidTTLIndex\",\"expireAfterSeconds\":1800}],\"writeConcern\":{},\"$db\":\"config\"},\"numYields\":0,\"reslen\":114,\"locks\":{\"ParallelBatchWriterMode\":{\"acquireCount\":{\"r\":5}},\"FeatureCompatibilityVersion\":{\"acquireCount\":{\"r\":2,\"w\":3}},\"ReplicationStateTransition\":{\"acquireCount\":{\"w\":5}},\"Global\":{\"acquireCount\":{\"r\":2,\"w\":3}},\"Database\":{\"acquireCount\":{\"r\":2,\"w\":3}},\"Collection\":{\"acquireCount\":{\"r\":3,\"w\":2}},\"Mutex\":{\"acquireCount\":{\"r\":6}}},\"flowControl\":{\"acquireCount\":1,\"timeAcquiringMicros\":2},\"storage\":{},\"protocol\":\"op_msg\",\"durationMillis\":1004}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:28.466+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"127.0.0.1:45044\",\"connectionId\":1,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:33.367+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn1\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"127.0.0.1:45044\",\"client\":\"conn1\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:36.472+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn1\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"admin.$cmd\",\"appName\":\"MongoDB Shell\",\"command\":{\"whatsmyuri\":1,\"$db\":\"admin\"},\"numYields\":0,\"reslen\":63,\"locks\":{},\"protocol\":\"op_msg\",\"durationMillis\":253}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:41.278+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn1\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"admin.$cmd\",\"appName\":\"MongoDB Shell\",\"command\":{\"buildinfo\":1.0,\"$db\":\"admin\"},\"numYields\":0,\"reslen\":1811,\"locks\":{},\"protocol\":\"op_msg\",\"durationMillis\":2311}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:12:46.870+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn1\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"127.0.0.1:45044\",\"connectionId\":1,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:13:37.506+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"WTCheckpointThread\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686132815:869508][30:0x7fc62de26700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 34, snapshot max: 34 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 1\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:13:37.486+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":23099, \"ctx\":\"PeriodicTaskRunner\",\"msg\":\"Task finished\",\"attr\":{\"taskName\":\"DBConnectionPool-cleaner\",\"durationMillis\":8031}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:13:57.468+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":23099, \"ctx\":\"PeriodicTaskRunner\",\"msg\":\"Task finished\",\"attr\":{\"taskName\":\"DBConnectionPool-cleaner\",\"durationMillis\":4043}}\r\n{\"t\":{\"$date\":\"2023-06-07T10:15:36.268+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":23099, \"ctx\":\"PeriodicTaskRunner\",\"msg\":\"Task finished\",\"attr\":{\"taskName\":\"DBConnectionPool-cleaner\",\"durationMillis\":14701}}\r\n/usr/local/bin/docker-entrypoint.sh: line 416: 81 Killed \"${mongo[@]}\" \"$rootAuthDatabase\" <<-EOJS\r\ndb.createUser({\r\nuser: $(_js_escape \"$MONGO_INITDB_ROOT_USERNAME\"),\r\npwd: $(_js_escape \"$MONGO_INITDB_ROOT_PASSWORD\"),\r\nroles: [ { role: 'root', db: $(_js_escape \"$rootAuthDatabase\") } ]\r\n})\r\nEOJS\r\n\r\n\r\nWARNING: MongoDB 5.0+ requires a CPU with AVX support, and your current system does not appear to have that!\r\n see https://jira.mongodb.org/browse/SERVER-54407\r\n see also https://www.mongodb.com/community/forums/t/mongodb-5-0-cpu-intel-g4650-compatibility/116610/2\r\n see also https://github.com/docker-library/mongo/issues/485#issuecomment-891991814\r\n\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.700+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23285, \"ctx\":\"main\",\"msg\":\"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'\"}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.703+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4648601, \"ctx\":\"main\",\"msg\":\"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize.\"}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.704+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4615611, \"ctx\":\"initandlisten\",\"msg\":\"MongoDB starting\",\"attr\":{\"pid\":1,\"port\":27017,\"dbPath\":\"/data/db\",\"architecture\":\"64-bit\",\"host\":\"a636cf2cec8a\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.704+00:00\"},\"s\":\"W\", \"c\":\"CONTROL\", \"id\":20720, \"ctx\":\"initandlisten\",\"msg\":\"Available memory is less than system memory\",\"attr\":{\"availableMemSizeMB\":128,\"systemMemSizeMB\":3927}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.704+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23403, \"ctx\":\"initandlisten\",\"msg\":\"Build Info\",\"attr\":{\"buildInfo\":{\"version\":\"4.4.22\",\"gitVersion\":\"fc832685b99221cffb1f5bb5a4ff5ad3e1c416b2\",\"openSSLVersion\":\"OpenSSL 1.1.1f 31 Mar 2020\",\"modules\":[],\"allocator\":\"tcmalloc\",\"environment\":{\"distmod\":\"ubuntu2004\",\"distarch\":\"x86_64\",\"target_arch\":\"x86_64\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.704+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":51765, \"ctx\":\"initandlisten\",\"msg\":\"Operating System\",\"attr\":{\"os\":{\"name\":\"Ubuntu\",\"version\":\"20.04\"}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.704+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":21951, \"ctx\":\"initandlisten\",\"msg\":\"Options set by command line\",\"attr\":{\"options\":{\"net\":{\"bindIp\":\"*\"},\"security\":{\"authorization\":\"enabled\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.706+00:00\"},\"s\":\"W\", \"c\":\"STORAGE\", \"id\":22271, \"ctx\":\"initandlisten\",\"msg\":\"Detected unclean shutdown - Lock file is not empty\",\"attr\":{\"lockFile\":\"/data/db/mongod.lock\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.706+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22270, \"ctx\":\"initandlisten\",\"msg\":\"Storage engine to use detected by data files\",\"attr\":{\"dbpath\":\"/data/db\",\"storageEngine\":\"wiredTiger\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.706+00:00\"},\"s\":\"W\", \"c\":\"STORAGE\", \"id\":22302, \"ctx\":\"initandlisten\",\"msg\":\"Recovering data from the last clean checkpoint.\"}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.706+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22300, \"ctx\":\"initandlisten\",\"msg\":\"The configured WiredTiger cache size is more than 80% of available RAM. See http://dochub.mongodb.org/core/faq-memory-diagnostics-wt\",\"tags\":[\"startupWarnings\"]}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:23.706+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22315, \"ctx\":\"initandlisten\",\"msg\":\"Opening WiredTiger\",\"attr\":{\"config\":\"create,cache_size=256M,session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000,close_scan_interval=10,close_handle_minimum=250),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress,compact_progress],\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:24.147+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686135864:147357][1:0x7f0929c1dcc0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 1 through 2\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:24.988+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686135864:988764][1:0x7f0929c1dcc0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 2 through 2\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:25.791+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686135865:791543][1:0x7f0929c1dcc0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Main recovery loop: starting at 1/21504 to 2/256\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:25.792+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686135865:792677][1:0x7f0929c1dcc0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 1 through 2\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:26.874+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686135866:874734][1:0x7f0929c1dcc0], file:sizeStorer.wt, txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 2 through 2\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:27.780+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686135867:780713][1:0x7f0929c1dcc0], file:sizeStorer.wt, txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Set global recovery timestamp: (0, 0)\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:27.781+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686135867:781022][1:0x7f0929c1dcc0], file:sizeStorer.wt, txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Set global oldest timestamp: (0, 0)\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:28.079+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686135868:79249][1:0x7f0929c1dcc0], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 16, snapshot max: 16 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 7\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:28.160+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4795906, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger opened\",\"attr\":{\"durationMillis\":4454}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:28.160+00:00\"},\"s\":\"I\", \"c\":\"RECOVERY\", \"id\":23987, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger recoveryTimestamp\",\"attr\":{\"recoveryTimestamp\":{\"$timestamp\":{\"t\":0,\"i\":0}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:28.669+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22262, \"ctx\":\"initandlisten\",\"msg\":\"Timestamp monitor starting\"}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:28.704+00:00\"},\"s\":\"W\", \"c\":\"CONTROL\", \"id\":22178, \"ctx\":\"initandlisten\",\"msg\":\"/sys/kernel/mm/transparent_hugepage/enabled is 'always'. We suggest setting it to 'never'\",\"tags\":[\"startupWarnings\"]}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:29.372+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":20536, \"ctx\":\"initandlisten\",\"msg\":\"Flow Control is enabled on this deployment\"}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:29.676+00:00\"},\"s\":\"I\", \"c\":\"FTDC\", \"id\":20625, \"ctx\":\"initandlisten\",\"msg\":\"Initializing full-time diagnostic data capture\",\"attr\":{\"dataDirectory\":\"/data/db/diagnostic.data\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:29.681+00:00\"},\"s\":\"I\", \"c\":\"REPL\", \"id\":6015317, \"ctx\":\"initandlisten\",\"msg\":\"Setting new configuration state\",\"attr\":{\"newState\":\"ConfigReplicationDisabled\",\"oldState\":\"ConfigPreStart\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:29.973+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":23015, \"ctx\":\"listener\",\"msg\":\"Listening on\",\"attr\":{\"address\":\"/tmp/mongodb-27017.sock\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:29.974+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":23015, \"ctx\":\"listener\",\"msg\":\"Listening on\",\"attr\":{\"address\":\"0.0.0.0\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:29.974+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":23016, \"ctx\":\"listener\",\"msg\":\"Waiting for connections\",\"attr\":{\"port\":27017,\"ssl\":\"off\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:30.057+00:00\"},\"s\":\"I\", \"c\":\"FTDC\", \"id\":20631, \"ctx\":\"ftdc\",\"msg\":\"Unclean full-time diagnostic data capture shutdown detected, found interim file, some metrics may have been lost\",\"attr\":{\"error\":{\"code\":0,\"codeName\":\"OK\"}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:39.257+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:38144\",\"connectionId\":1,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:39.258+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn1\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:38144\",\"client\":\"conn1\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:40.995+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn1\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:38144\",\"connectionId\":1,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:59.469+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:48658\",\"connectionId\":2,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:59.496+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn2\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:48658\",\"client\":\"conn2\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:04:59.679+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn2\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:48658\",\"connectionId\":2,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:16.191+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:41614\",\"connectionId\":3,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:16.191+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn3\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:41614\",\"client\":\"conn3\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:16.382+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn3\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:41614\",\"connectionId\":3,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:28.707+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"WTCheckpointThread\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686135928:707384][1:0x7f0922c0e700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 18, snapshot max: 18 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 7\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:31.571+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":23099, \"ctx\":\"PeriodicTaskRunner\",\"msg\":\"Task finished\",\"attr\":{\"taskName\":\"DBConnectionPool-cleaner\",\"durationMillis\":1702}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:33.168+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:46880\",\"connectionId\":4,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:33.169+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn4\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:46880\",\"client\":\"conn4\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:33.366+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn4\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:46880\",\"connectionId\":4,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:50.005+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:47756\",\"connectionId\":5,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:50.006+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn5\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:47756\",\"client\":\"conn5\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:50.106+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn5\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:47756\",\"connectionId\":5,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:56.926+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.7:34006\",\"connectionId\":6,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:57.585+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn6\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.7:34006\",\"client\":\"conn6\",\"doc\":{\"driver\":{\"name\":\"mongo-go-driver\",\"version\":\"v1.3.5\"},\"os\":{\"type\":\"linux\",\"architecture\":\"amd64\"},\"platform\":\"go1.20.4\"}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:57.601+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.7:34020\",\"connectionId\":7,\"connectionCount\":2}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:57.619+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn7\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.7:34020\",\"client\":\"conn7\",\"doc\":{\"driver\":{\"name\":\"mongo-go-driver\",\"version\":\"v1.3.5\"},\"os\":{\"type\":\"linux\",\"architecture\":\"amd64\"},\"platform\":\"go1.20.4\"}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:05:59.871+00:00\"},\"s\":\"I\", \"c\":\"ACCESS\", \"id\":20251, \"ctx\":\"conn7\",\"msg\":\"Supported SASL mechanisms requested for unknown user\",\"attr\":{\"user\":\"admin@admin\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:01.399+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn7\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"admin.$cmd\",\"command\":{\"isMaster\":1,\"saslSupportedMechs\":\"admin.admin\",\"compression\":[],\"client\":{\"driver\":{\"name\":\"mongo-go-driver\",\"version\":\"v1.3.5\"},\"os\":{\"type\":\"linux\",\"architecture\":\"amd64\"},\"platform\":\"go1.20.4\"},\"$readPreference\":{\"mode\":\"secondaryPreferred\"},\"$db\":\"admin\"},\"numYields\":0,\"reslen\":319,\"locks\":{},\"protocol\":\"op_query\",\"durationMillis\":2792}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:01.779+00:00\"},\"s\":\"I\", \"c\":\"ACCESS\", \"id\":20249, \"ctx\":\"conn7\",\"msg\":\"Authentication failed\",\"attr\":{\"mechanism\":\"SCRAM-SHA-1\",\"speculative\":false,\"principalName\":\"admin\",\"authenticationDatabase\":\"admin\",\"remote\":\"172.18.0.7:34020\",\"extraInfo\":{},\"error\":\"UserNotFound: Could not find user \\\"admin\\\" for db \\\"admin\\\"\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:01.795+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn7\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"admin.$cmd\",\"command\":{\"saslStart\":1,\"mechanism\":\"SCRAM-SHA-1\",\"payload\":\"xxx\",\"$db\":\"admin\"},\"numYields\":0,\"ok\":0,\"errMsg\":\"Authentication failed.\",\"errName\":\"AuthenticationFailed\",\"errCode\":18,\"reslen\":118,\"locks\":{},\"protocol\":\"op_msg\",\"durationMillis\":106}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:01.796+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn7\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.7:34020\",\"connectionId\":7,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:01.833+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn6\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.7:34006\",\"connectionId\":6,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:06.303+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:54760\",\"connectionId\":8,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:06.304+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn8\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:54760\",\"client\":\"conn8\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:06.498+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn8\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:54760\",\"connectionId\":8,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:22.268+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:33884\",\"connectionId\":9,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:22.269+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn9\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:33884\",\"client\":\"conn9\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:22.380+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn9\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:33884\",\"connectionId\":9,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:28.764+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"WTCheckpointThread\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686135988:764217][1:0x7f0922c0e700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 21, snapshot max: 21 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 7\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:38.378+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:33064\",\"connectionId\":10,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:38.386+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn10\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:33064\",\"client\":\"conn10\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:38.579+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn10\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:33064\",\"connectionId\":10,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:40.387+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.7:35868\",\"connectionId\":11,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:40.476+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn11\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.7:35868\",\"client\":\"conn11\",\"doc\":{\"driver\":{\"name\":\"mongo-go-driver\",\"version\":\"v1.3.5\"},\"os\":{\"type\":\"linux\",\"architecture\":\"amd64\"},\"platform\":\"go1.20.4\"}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:40.513+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.7:35876\",\"connectionId\":12,\"connectionCount\":2}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:40.513+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn12\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.7:35876\",\"client\":\"conn12\",\"doc\":{\"driver\":{\"name\":\"mongo-go-driver\",\"version\":\"v1.3.5\"},\"os\":{\"type\":\"linux\",\"architecture\":\"amd64\"},\"platform\":\"go1.20.4\"}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:40.524+00:00\"},\"s\":\"I\", \"c\":\"ACCESS\", \"id\":20251, \"ctx\":\"conn12\",\"msg\":\"Supported SASL mechanisms requested for unknown user\",\"attr\":{\"user\":\"admin@admin\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:40.525+00:00\"},\"s\":\"I\", \"c\":\"ACCESS\", \"id\":20249, \"ctx\":\"conn12\",\"msg\":\"Authentication failed\",\"attr\":{\"mechanism\":\"SCRAM-SHA-1\",\"speculative\":false,\"principalName\":\"admin\",\"authenticationDatabase\":\"admin\",\"remote\":\"172.18.0.7:35876\",\"extraInfo\":{},\"error\":\"UserNotFound: Could not find user \\\"admin\\\" for db \\\"admin\\\"\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:40.526+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn12\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.7:35876\",\"connectionId\":12,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:40.567+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn11\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.7:35868\",\"connectionId\":11,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:54.472+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:41320\",\"connectionId\":13,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:54.474+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn13\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:41320\",\"client\":\"conn13\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:06:54.569+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn13\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:41320\",\"connectionId\":13,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:10.569+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:44538\",\"connectionId\":14,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:10.570+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn14\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:44538\",\"client\":\"conn14\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:10.668+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn14\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:44538\",\"connectionId\":14,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:27.069+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:56014\",\"connectionId\":15,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:27.070+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn15\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:56014\",\"client\":\"conn15\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:27.172+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn15\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:56014\",\"connectionId\":15,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:28.818+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"WTCheckpointThread\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686136048:818265][1:0x7f0922c0e700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 23, snapshot max: 23 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 7\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:42.870+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:58318\",\"connectionId\":16,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:42.871+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn16\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:58318\",\"client\":\"conn16\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:42.994+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn16\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:58318\",\"connectionId\":16,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:58.492+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:38162\",\"connectionId\":17,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:58.566+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn17\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:38162\",\"client\":\"conn17\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:07:58.778+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn17\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:38162\",\"connectionId\":17,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:08:14.394+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:33518\",\"connectionId\":18,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:08:14.395+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn18\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:33518\",\"client\":\"conn18\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:08:14.484+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn18\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:33518\",\"connectionId\":18,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:08:28.908+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"WTCheckpointThread\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686136108:908044][1:0x7f0922c0e700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 25, snapshot max: 25 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 7\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:08:49.778+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:47224\",\"connectionId\":19,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:08:49.817+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn19\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:47224\",\"client\":\"conn19\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:08:51.268+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn19\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:47224\",\"connectionId\":19,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:11.107+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:47016\",\"connectionId\":20,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:12.077+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn20\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:47016\",\"client\":\"conn20\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:24.584+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn20\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"admin.$cmd\",\"appName\":\"MongoDB Shell\",\"command\":{\"isMaster\":1,\"client\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}},\"$db\":\"admin\"},\"numYields\":0,\"reslen\":319,\"locks\":{},\"protocol\":\"op_query\",\"durationMillis\":695}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:29.833+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn20\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"test.$cmd\",\"appName\":\"MongoDB Shell\",\"command\":{\"isMaster\":1.0,\"forShell\":1.0,\"$db\":\"test\"},\"numYields\":0,\"reslen\":304,\"locks\":{},\"protocol\":\"op_msg\",\"durationMillis\":239}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:38.679+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":20499, \"ctx\":\"ftdc\",\"msg\":\"serverStatus was very slow\",\"attr\":{\"timeStats\":{\"after basic\":8,\"after asserts\":8,\"after connections\":8,\"after electionMetrics\":8,\"after extra_info\":8,\"after flowControl\":8,\"after globalLock\":118,\"after indexBulkBuilder\":118,\"after locks\":118,\"after logicalSessionRecordCache\":7021,\"after mirroredReads\":7021,\"after network\":7021,\"after opLatencies\":7021,\"after opReadConcernCounters\":7021,\"after opcounters\":7021,\"after opcountersRepl\":7021,\"after oplog\":7021,\"after oplogTruncation\":7119,\"after repl\":7119,\"after scramCache\":7119,\"after security\":7119,\"after storageEngine\":7119,\"after tcmalloc\":7119,\"after trafficRecording\":7119,\"after transactions\":7119,\"after transportSecurity\":7119,\"after twoPhaseCommitCoordinator\":7119,\"after wiredTiger\":7527,\"at end\":8600}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:41.471+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22944, \"ctx\":\"conn20\",\"msg\":\"Connection ended\",\"attr\":{\"remote\":\"172.18.0.3:47016\",\"connectionId\":20,\"connectionCount\":0}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:47.168+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":20499, \"ctx\":\"ftdc\",\"msg\":\"serverStatus was very slow\",\"attr\":{\"timeStats\":{\"after basic\":0,\"after asserts\":0,\"after connections\":0,\"after electionMetrics\":0,\"after extra_info\":423,\"after flowControl\":423,\"after globalLock\":423,\"after indexBulkBuilder\":423,\"after locks\":423,\"after logicalSessionRecordCache\":423,\"after mirroredReads\":423,\"after network\":423,\"after opLatencies\":423,\"after opReadConcernCounters\":423,\"after opcounters\":423,\"after opcountersRepl\":423,\"after oplog\":423,\"after oplogTruncation\":550,\"after repl\":550,\"after scramCache\":550,\"after security\":550,\"after storageEngine\":550,\"after tcmalloc\":550,\"after trafficRecording\":550,\"after transactions\":550,\"after transportSecurity\":550,\"after twoPhaseCommitCoordinator\":550,\"after wiredTiger\":550,\"at end\":1110}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:54.376+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"LogicalSessionCacheReap\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"config.system.sessions\",\"command\":{\"listIndexes\":\"system.sessions\",\"cursor\":{},\"$db\":\"config\"},\"numYields\":0,\"reslen\":245,\"locks\":{\"FeatureCompatibilityVersion\":{\"acquireCount\":{\"r\":1}},\"ReplicationStateTransition\":{\"acquireCount\":{\"w\":1}},\"Global\":{\"acquireCount\":{\"r\":1}},\"Database\":{\"acquireCount\":{\"r\":1}},\"Collection\":{\"acquireCount\":{\"r\":1}},\"Mutex\":{\"acquireCount\":{\"r\":1}}},\"storage\":{},\"protocol\":\"op_msg\",\"durationMillis\":10399}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:54.675+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"WTCheckpointThread\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686136194:675072][1:0x7f0922c0e700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 27, snapshot max: 27 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 7\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:56.492+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":20499, \"ctx\":\"ftdc\",\"msg\":\"serverStatus was very slow\",\"attr\":{\"timeStats\":{\"after basic\":869,\"after asserts\":869,\"after connections\":869,\"after electionMetrics\":869,\"after extra_info\":1042,\"after flowControl\":1042,\"after globalLock\":1042,\"after indexBulkBuilder\":1042,\"after locks\":1042,\"after logicalSessionRecordCache\":1042,\"after mirroredReads\":1042,\"after network\":1042,\"after opLatencies\":1042,\"after opReadConcernCounters\":1042,\"after opcounters\":1042,\"after opcountersRepl\":1042,\"after oplog\":1042,\"after oplogTruncation\":1237,\"after repl\":1237,\"after scramCache\":1237,\"after security\":1237,\"after storageEngine\":1237,\"after tcmalloc\":1237,\"after trafficRecording\":1237,\"after transactions\":1237,\"after transportSecurity\":1237,\"after twoPhaseCommitCoordinator\":1237,\"after wiredTiger\":1238,\"at end\":1414}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:59.266+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"LogicalSessionCacheRefresh\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"config.system.sessions\",\"command\":{\"listIndexes\":\"system.sessions\",\"cursor\":{},\"$db\":\"config\"},\"numYields\":0,\"reslen\":245,\"locks\":{\"FeatureCompatibilityVersion\":{\"acquireCount\":{\"r\":1}},\"ReplicationStateTransition\":{\"acquireCount\":{\"w\":1}},\"Global\":{\"acquireCount\":{\"r\":1}},\"Database\":{\"acquireCount\":{\"r\":1}},\"Collection\":{\"acquireCount\":{\"r\":1}},\"Mutex\":{\"acquireCount\":{\"r\":1}}},\"storage\":{},\"protocol\":\"op_msg\",\"durationMillis\":15195}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:09:59.614+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"LogicalSessionCacheReap\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"config.transactions\",\"command\":{\"find\":\"transactions\",\"filter\":{\"lastWriteDate\":{\"$lt\":{\"$date\":\"2023-06-07T10:39:54.489Z\"}}},\"projection\":{\"_id\":1},\"sort\":{\"_id\":1},\"readConcern\":{},\"$db\":\"config\"},\"planSummary\":\"EOF\",\"keysExamined\":0,\"docsExamined\":0,\"cursorExhausted\":true,\"numYields\":0,\"nreturned\":0,\"reslen\":108,\"locks\":{\"FeatureCompatibilityVersion\":{\"acquireCount\":{\"r\":1}},\"ReplicationStateTransition\":{\"acquireCount\":{\"w\":1}},\"Global\":{\"acquireCount\":{\"r\":1}},\"Database\":{\"acquireCount\":{\"r\":1}},\"Collection\":{\"acquireCount\":{\"r\":2}},\"Mutex\":{\"acquireCount\":{\"r\":1}}},\"readConcern\":{\"provenance\":\"clientSupplied\"},\"storage\":{},\"protocol\":\"op_msg\",\"durationMillis\":5113}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:10:31.918+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":20499, \"ctx\":\"ftdc\",\"msg\":\"serverStatus was very slow\",\"attr\":{\"timeStats\":{\"after basic\":201,\"after asserts\":201,\"after connections\":201,\"after electionMetrics\":201,\"after extra_info\":534,\"after flowControl\":534,\"after globalLock\":534,\"after indexBulkBuilder\":534,\"after locks\":534,\"after logicalSessionRecordCache\":534,\"after mirroredReads\":534,\"after network\":534,\"after opLatencies\":534,\"after opReadConcernCounters\":534,\"after opcounters\":534,\"after opcountersRepl\":534,\"after oplog\":534,\"after oplogTruncation\":534,\"after repl\":534,\"after scramCache\":534,\"after security\":534,\"after storageEngine\":534,\"after tcmalloc\":534,\"after trafficRecording\":534,\"after transactions\":534,\"after transportSecurity\":534,\"after twoPhaseCommitCoordinator\":534,\"after wiredTiger\":534,\"at end\":1222}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:10:51.873+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":23099, \"ctx\":\"PeriodicTaskRunner\",\"msg\":\"Task finished\",\"attr\":{\"taskName\":\"DBConnectionPool-cleaner\",\"durationMillis\":4405}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:10:56.066+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:60452\",\"connectionId\":21,\"connectionCount\":1}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:10:57.267+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":22943, \"ctx\":\"listener\",\"msg\":\"Connection accepted\",\"attr\":{\"remote\":\"172.18.0.3:60458\",\"connectionId\":22,\"connectionCount\":2}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:10:58.669+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":23099, \"ctx\":\"PeriodicTaskRunner\",\"msg\":\"Task finished\",\"attr\":{\"taskName\":\"UnusedLockCleaner\",\"durationMillis\":6795}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:11:03.580+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":20499, \"ctx\":\"ftdc\",\"msg\":\"serverStatus was very slow\",\"attr\":{\"timeStats\":{\"after basic\":100,\"after asserts\":100,\"after connections\":100,\"after electionMetrics\":100,\"after extra_info\":403,\"after flowControl\":403,\"after globalLock\":403,\"after indexBulkBuilder\":403,\"after locks\":905,\"after logicalSessionRecordCache\":905,\"after mirroredReads\":905,\"after network\":905,\"after opLatencies\":905,\"after opReadConcernCounters\":905,\"after opcounters\":905,\"after opcountersRepl\":905,\"after oplog\":905,\"after oplogTruncation\":907,\"after repl\":907,\"after scramCache\":907,\"after security\":907,\"after storageEngine\":907,\"after tcmalloc\":907,\"after trafficRecording\":907,\"after transactions\":907,\"after transportSecurity\":907,\"after twoPhaseCommitCoordinator\":907,\"after wiredTiger\":907,\"at end\":1313}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:11:05.968+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"WTCheckpointThread\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686136265:968123][1:0x7f0922c0e700], WT_SESSION.checkpoint: [WT_VERB_CHECKPOINT_PROGRESS] saving checkpoint snapshot min: 29, snapshot max: 29 snapshot count: 0, oldest timestamp: (0, 0) , meta checkpoint timestamp: (0, 0) base write gen: 7\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:11:19.069+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":20499, \"ctx\":\"ftdc\",\"msg\":\"serverStatus was very slow\",\"attr\":{\"timeStats\":{\"after basic\":0,\"after asserts\":0,\"after connections\":0,\"after electionMetrics\":0,\"after extra_info\":460,\"after flowControl\":460,\"after globalLock\":460,\"after indexBulkBuilder\":460,\"after locks\":460,\"after logicalSessionRecordCache\":460,\"after mirroredReads\":460,\"after network\":460,\"after opLatencies\":460,\"after opReadConcernCounters\":460,\"after opcounters\":460,\"after opcountersRepl\":460,\"after oplog\":460,\"after oplogTruncation\":466,\"after repl\":466,\"after scramCache\":466,\"after security\":466,\"after storageEngine\":466,\"after tcmalloc\":466,\"after trafficRecording\":466,\"after transactions\":466,\"after transportSecurity\":466,\"after twoPhaseCommitCoordinator\":466,\"after wiredTiger\":466,\"at end\":1062}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:11:14.168+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn21\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:60452\",\"client\":\"conn21\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:11:14.168+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":51800, \"ctx\":\"conn22\",\"msg\":\"client metadata\",\"attr\":{\"remote\":\"172.18.0.3:60458\",\"client\":\"conn22\",\"doc\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:11:42.178+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn22\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"admin.$cmd\",\"appName\":\"MongoDB Shell\",\"command\":{\"isMaster\":1,\"client\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}},\"$db\":\"admin\"},\"numYields\":0,\"reslen\":319,\"locks\":{},\"protocol\":\"op_query\",\"durationMillis\":26393}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:11:42.170+00:00\"},\"s\":\"I\", \"c\":\"COMMAND\", \"id\":51803, \"ctx\":\"conn21\",\"msg\":\"Slow query\",\"attr\":{\"type\":\"command\",\"ns\":\"admin.$cmd\",\"appName\":\"MongoDB Shell\",\"command\":{\"isMaster\":1,\"client\":{\"application\":{\"name\":\"MongoDB Shell\"},\"driver\":{\"name\":\"MongoDB Internal Client\",\"version\":\"4.4.22\"},\"os\":{\"type\":\"Linux\",\"name\":\"Ubuntu\",\"architecture\":\"x86_64\",\"version\":\"20.04\"}},\"$db\":\"admin\"},\"numYields\":0,\"reslen\":319,\"locks\":{},\"protocol\":\"op_query\",\"durationMillis\":26999}}\r\n\r\nWARNING: MongoDB 5.0+ requires a CPU with AVX support, and your current system does not appear to have that!\r\n see https://jira.mongodb.org/browse/SERVER-54407\r\n see also https://www.mongodb.com/community/forums/t/mongodb-5-0-cpu-intel-g4650-compatibility/116610/2\r\n see also https://github.com/docker-library/mongo/issues/485#issuecomment-891991814\r\n\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.003+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23285, \"ctx\":\"main\",\"msg\":\"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'\"}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.014+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4648601, \"ctx\":\"main\",\"msg\":\"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize.\"}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.016+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":4615611, \"ctx\":\"initandlisten\",\"msg\":\"MongoDB starting\",\"attr\":{\"pid\":1,\"port\":27017,\"dbPath\":\"/data/db\",\"architecture\":\"64-bit\",\"host\":\"a636cf2cec8a\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.021+00:00\"},\"s\":\"W\", \"c\":\"CONTROL\", \"id\":20720, \"ctx\":\"initandlisten\",\"msg\":\"Available memory is less than system memory\",\"attr\":{\"availableMemSizeMB\":128,\"systemMemSizeMB\":3927}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.021+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":23403, \"ctx\":\"initandlisten\",\"msg\":\"Build Info\",\"attr\":{\"buildInfo\":{\"version\":\"4.4.22\",\"gitVersion\":\"fc832685b99221cffb1f5bb5a4ff5ad3e1c416b2\",\"openSSLVersion\":\"OpenSSL 1.1.1f 31 Mar 2020\",\"modules\":[],\"allocator\":\"tcmalloc\",\"environment\":{\"distmod\":\"ubuntu2004\",\"distarch\":\"x86_64\",\"target_arch\":\"x86_64\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.021+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":51765, \"ctx\":\"initandlisten\",\"msg\":\"Operating System\",\"attr\":{\"os\":{\"name\":\"Ubuntu\",\"version\":\"20.04\"}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.021+00:00\"},\"s\":\"I\", \"c\":\"CONTROL\", \"id\":21951, \"ctx\":\"initandlisten\",\"msg\":\"Options set by command line\",\"attr\":{\"options\":{\"net\":{\"bindIp\":\"*\"},\"security\":{\"authorization\":\"enabled\"}}}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.023+00:00\"},\"s\":\"W\", \"c\":\"STORAGE\", \"id\":22271, \"ctx\":\"initandlisten\",\"msg\":\"Detected unclean shutdown - Lock file is not empty\",\"attr\":{\"lockFile\":\"/data/db/mongod.lock\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.023+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22270, \"ctx\":\"initandlisten\",\"msg\":\"Storage engine to use detected by data files\",\"attr\":{\"dbpath\":\"/data/db\",\"storageEngine\":\"wiredTiger\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.023+00:00\"},\"s\":\"W\", \"c\":\"STORAGE\", \"id\":22302, \"ctx\":\"initandlisten\",\"msg\":\"Recovering data from the last clean checkpoint.\"}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.023+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22300, \"ctx\":\"initandlisten\",\"msg\":\"The configured WiredTiger cache size is more than 80% of available RAM. See http://dochub.mongodb.org/core/faq-memory-diagnostics-wt\",\"tags\":[\"startupWarnings\"]}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:17.024+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22315, \"ctx\":\"initandlisten\",\"msg\":\"Opening WiredTiger\",\"attr\":{\"config\":\"create,cache_size=256M,session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000,close_scan_interval=10,close_handle_minimum=250),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress,compact_progress],\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:20.997+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686138740:997479][1:0x7f75326efcc0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 2 through 3\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:30.394+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686138750:394043][1:0x7f75326efcc0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 3 through 3\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:42.413+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686138762:413345][1:0x7f75326efcc0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Main recovery loop: starting at 2/8192 to 3/256\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:52:43.283+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686138763:283850][1:0x7f75326efcc0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 2 through 3\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:53:01.955+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686138781:955463][1:0x7f75326efcc0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 3 through 3\"}}\r\n{\"t\":{\"$date\":\"2023-06-07T11:54:12.205+00:00\"},\"s\":\"I\", \"c\":\"STORAGE\", \"id\":22430, \"ctx\":\"initandlisten\",\"msg\":\"WiredTiger message\",\"attr\":{\"message\":\"[1686138849:906448][1:0x7f75326efcc0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Set global recovery timestamp: (0, 0)\"}}\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior.\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Runtime Environment**\r\nSytem/Environemnt information (e.g Output of docker -v and uname -a)\r\n\r\n", + "summary": "**Summarize the MongoDB Container Error Issue**\n\n1. **Identify the issue**: Address the error occurring when starting the MongoDB container using Docker. The command run is:\n ```\n sudo VERSION=develop docker-compose -f docker-compose.yml --compatibility up -d\n ```\n The MongoDB container shows an error while others run without issues.\n\n2. **Examine logs**: Review the MongoDB container logs using:\n ```\n docker logs mongodb\n ```\n Key warnings include:\n - MongoDB 5.0+ requires CPU with AVX support, which your system lacks. Reference: [JIRA SERVER-54407](https://jira.mongodb.org/browse/SERVER-54407).\n - Available memory is significantly lower than the system memory.\n - The configured WiredTiger cache size exceeds 80% of available RAM.\n\n3. **Possible causes**:\n - Lack of AVX support in the CPU.\n - Insufficient available memory for MongoDB operations.\n - Unclean shutdown detected, indicated by a non-empty lock file (`/data/db/mongod.lock`).\n\n4. **Proposed first steps**:\n - Confirm CPU capabilities and consider using a machine with AVX support if necessary.\n - Free up or allocate more RAM to the container to ensure it has adequate resources.\n - If the lock file indicates an improper shutdown, remove the lock file:\n ```\n rm /data/db/mongod.lock\n ```\n - Adjust the WiredTiger cache size in the MongoDB configuration to use less RAM or increase the available RAM in the system.\n\n5. **Monitor results**: Restart the MongoDB container after making the necessary adjustments and check the logs again for any persistent issues or errors. \n\nBy following these steps, you should be able to troubleshoot the MongoDB container error effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/206", @@ -7547,6 +7811,7 @@ "node_id": "I_kwDOFAM8Ps54h0Dn", "title": "crAPI main page not loading on localhost", "body": "**Describe the bug**\r\nEvery time I curl from Linux the file with the given command it gets to my folder empty + when I manually filled the file with the set up and pull the main page is not loading on my localhost\r\n\r\n**To Reproduce**\r\n![image](https://github.com/OWASP/crAPI/assets/109968877/7f4a13b9-6940-4f05-84c6-8d2470280c6c)\r\n![image](https://github.com/OWASP/crAPI/assets/109968877/1f78f3ab-048e-4a88-b20c-1dd1f291001e)\r\n\r\n\r\n**Expected behavior**\r\ndocker-compose.yml should contain the set up. I tried manually editing the file and it runs but when in the login the loader never ends as I showed on the pictures above\r\n\r\n", + "summary": "Investigate the issue with the crAPI main page not loading on localhost. Confirm that the `docker-compose.yml` file is correctly configured with the necessary setup. \n\n1. Check the output of the `curl` command; ensure that the expected files are being downloaded correctly and are not empty.\n2. Verify the contents of the `docker-compose.yml` file. Ensure all required services are defined and correctly configured.\n3. If manually editing the `docker-compose.yml` resolves some issues but leads to an infinite loading screen during login, consider checking the logs of the running services for any errors. Use `docker-compose logs` to fetch the logs.\n4. Investigate network configurations and firewall settings that may be blocking access to localhost.\n5. Ensure all dependencies are correctly installed and running. Run `docker-compose up` to start the services and monitor for any errors during startup.\n\nBegin troubleshooting with these steps to pinpoint the root cause of the loading issue.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/221", @@ -7575,6 +7840,7 @@ "node_id": "I_kwDOFAM8Ps54-xJF", "title": " required but undefined property 'converstion_params' in openapi-spec.json", "body": "In the openapi-spec.json file, the definition for 'ProfileVideo' at line 3012 appears to have a spelling mistake for one of the required properties.\r\n\r\nconvers**t**ion_params should be conversion_params (additional 't' character is likely an error).\r\n\r\n\"crAPI_requiredParam_misSpelling\"\r\n", + "summary": "Fix the spelling mistake in the 'openapi-spec.json' file regarding the 'ProfileVideo' definition at line 3012. Change the required property from 'converstion_params' to 'conversion_params' to eliminate the undefined property error. \n\n1. Open the 'openapi-spec.json' file.\n2. Navigate to line 3012.\n3. Locate the entry for 'converstion_params'.\n4. Correct the spelling to 'conversion_params'.\n5. Save the changes and validate the JSON structure to ensure there are no syntax errors.\n6. Test the API to confirm that the required property is now recognized correctly.\n\nEnsure to run any existing tests or linting processes to verify that the change does not introduce new issues.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/222", @@ -7603,6 +7869,7 @@ "node_id": "I_kwDOFAM8Ps5-2ehZ", "title": "Need help with challenge 13", "body": "Hi,\r\nI'm having issues solving challenge 13. Is this an appropriate place to ask for help?\r\nEdw.", + "summary": "Request assistance for Challenge 13. Clearly outline the specific problems you are facing, including any error messages or unexpected behavior. Share relevant code snippets or configurations that relate to the challenge. \n\nTo tackle the problem, first, review the challenge requirements and constraints. Then, isolate the section of code that is causing issues. Utilize debugging techniques such as adding print statements or using a debugger to trace the execution flow. Additionally, consult the documentation or resources related to the technology you're using for this challenge. Engage with the community by posting your findings and questions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/234", @@ -7629,6 +7896,7 @@ "node_id": "I_kwDOFAM8Ps5-5l6w", "title": "PostgreSQL database not accepting edits", "body": "Hi,\r\nIn Challenge 13, I have found the coupon_code parameter in the /workshop/api/shop/apply_coupon to be injectable.\r\nI also found the applied_coupon table in the PostgreSQL database.\r\n\r\nThe endpoint accepts the following injection and returns the database version:\r\n\"coupon_code\":\"TRAC075'; SELECT version() --+\"\r\n\r\nBut it refuses the following and returns a 500 error:\r\n\"coupon_code\":\"TRAC075'; DELETE FROM applied_coupon WHERE coupon_code=TRAC075 --+\"\r\n\r\nIs there anything that needs to be changed in the crAPI config file to allow user edits to be made to the database? I noticed there are restrictions for shell injection.\r\n\r\nThanks,\r\nEdw.\r\n", + "summary": "Investigate the PostgreSQL database's handling of SQL injection and user input sanitization. \n\n1. Confirm the vulnerability in the `/workshop/api/shop/apply_coupon` endpoint by testing the injectable `coupon_code` parameter. \n2. Attempt to execute basic SQL commands through the parameter to understand the extent of the injection vulnerability.\n3. Analyze the server's response to various SQL commands, noting the 500 error for delete operations.\n4. Review the crAPI configuration file for any settings that may restrict database write operations or user edits.\n5. Determine if there are security measures like query whitelisting or input validation that could be causing the issue.\n\nConsider modifying the configuration to allow write operations if appropriate, while ensuring that security is maintained.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/235", @@ -7655,6 +7923,7 @@ "node_id": "I_kwDOFAM8Ps6E6ERf", "title": "Getting Core Dumped Error!!", "body": "**Describe the bug**\r\nWhen attempting to run the crAPI Docker container (`crapi/crapi-web:latest`) on my Ubuntu x86_64 system with Docker version 20.10.13, the container fails to start and exits with an \"Illegal instruction (core dumped)\" error.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Pull the crAPI Docker image:\r\n ```bash\r\n docker pull crapi/crapi-web:latest\r\n ```\r\n2. Run the Docker container:\r\n ```bash\r\n docker run --name crapi-web crapi/crapi-web:latest\r\n ```\r\n3. Observe the container status and inspect Docker logs:\r\n ```bash\r\n docker logs crapi-web\r\n ```\r\n4. Output\r\n```bash\r\ntotal 16\r\ndrwxr-xr-x 2 root root 4096 Feb 4 15:58 .\r\ndrwxr-xr-x 1 root root 4096 Feb 4 16:05 ..\r\n-rw-r--r-- 1 root root 2240 Feb 4 15:58 server.crt\r\n-rw-r--r-- 1 root root 3272 Feb 4 15:58 server.key\r\nWORKSHOP_SERVICE=crapi-workshop:8000\r\nHOSTNAME=6685c8e25d64\r\nSHLVL=2\r\nHOME=/root\r\nHTTP_PROTOCOL=http\r\nPATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/openresty/luajit/bin:/usr/local/openresty/nginx/sbin:/usr/local/openresty/bin\r\nCOMMUNITY_SERVICE=crapi-community:8087\r\nTLS_ENABLED=false\r\nIDENTITY_SERVICE=crapi-identity:8080\r\nPWD=/\r\nNGINX_TEMPLATE=/etc/nginx/conf.d/default.conf.template\r\nIllegal instruction (core dumped)\r\n```\r\n\r\n**Expected behavior**\r\nThe crAPI Docker container (`crapi/crapi-web:latest`) should start successfully and be ready to serve requests on the specified ports (e.g., port 80 and port 443).\r\n\r\n**Runtime Environment**\r\n- Docker Version:\r\n ```bash\r\n docker -v\r\n ```\r\n Output:\r\n ```\r\n Docker version 20.10.25+dfsg1, build b82b9f3\r\n ```\r\n- Operating System:\r\n ```bash\r\n uname -a\r\n ```\r\n Output:\r\n ```\r\n Linux kali 6.6.9-amd64 #1 SMP PREEMPT_DYNAMIC Kali 6.6.9-1kali1 (2024-01-08) x86_64 GNU/Linux\r\n ```\r\n\r\n**Additional Information**\r\n- No custom configurations or changes were applied to the Docker environment.\r\n- Restarting Docker service and rebooting the host machine did not resolve the issue.\r\n \r\nPlease let me know if further information or logs are needed to diagnose this issue. Thank you!", + "summary": "Diagnose and resolve the \"Illegal instruction (core dumped)\" error when running the crAPI Docker container (`crapi/crapi-web:latest`) on Ubuntu x86_64.\n\n1. **Verify Compatibility**: Ensure that the Docker image is compatible with your CPU architecture. Use `docker inspect` to check the architecture of the `crapi/crapi-web:latest` image.\n\n2. **Check System Requirements**: Ensure that your system meets all the prerequisites for running crAPI, including any necessary libraries and dependencies.\n\n3. **Inspect Docker Logs**: Use `docker logs crapi-web` to gather more detailed error output. Look for any specific commands or instructions that might be causing the illegal instruction.\n\n4. **Test with Different Docker Versions**: Try running the container on a different Docker version or upgrade to the latest version. Sometimes, compatibility issues arise with specific versions.\n\n5. **Run with Debugging Options**: Start the container with debugging options enabled to gain more insight into the crash:\n ```bash\n docker run --name crapi-web --entrypoint /bin/bash crapi/crapi-web:latest\n ```\n\n6. **Check for Hardware Issues**: Run a hardware diagnostic to ensure the CPU is functioning correctly. Sometimes, core dumps can be a result of hardware failures.\n\n7. **Examine CPU Features**: Use `cat /proc/cpuinfo` to check for CPU features. If the Docker image is built with specific CPU optimizations (like AVX or SSE), ensure your CPU supports those features.\n\n8. **Contact Support**: If the issue persists, provide detailed logs and the results of the above checks to the crAPI support team or file a new issue on GitHub for further assistance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/241", @@ -7685,6 +7954,7 @@ "node_id": "I_kwDOFAM8Ps6Hlq3D", "title": " Install crAPI in userland Kali Linux", "body": "Hello,\r\nI am trying to install crAPI at userland Kali Linux.\r\nThe commands are not working.\r\nMay you could give me advise?\r\n\r\nThank you and Kind regards\r\n", + "summary": "Install crAPI on userland Kali Linux. Troubleshoot the installation commands that are failing. \n\n1. Verify your current environment and ensure you have the necessary dependencies installed by running:\n ```bash\n sudo apt update && sudo apt install git python3 python3-pip\n ```\n\n2. Clone the crAPI repository:\n ```bash\n git clone https://github.com/your-repo/crAPI.git\n ```\n\n3. Navigate to the crAPI directory:\n ```bash\n cd crAPI\n ```\n\n4. Install the required Python packages:\n ```bash\n pip3 install -r requirements.txt\n ```\n\n5. Check for any specific installation instructions in the README file of the crAPI repository.\n\n6. If errors persist, capture the error messages and search for solutions or report the issue with detailed logs for further assistance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/246", @@ -7713,6 +7983,7 @@ "node_id": "I_kwDOFAM8Ps6LlDo9", "title": "Security missconfiguration vulnerability ", "body": "Want to ask that is the crAPI have security missconfiguration vulnerability ", + "summary": "Investigate potential security misconfiguration vulnerabilities in crAPI. \n\n1. Review the crAPI configuration files for default settings that may expose sensitive information.\n2. Examine access controls and permissions to ensure they are properly set for users and services.\n3. Check for unnecessary services or ports that are open, which could be exploited.\n4. Conduct a security assessment to identify any outdated libraries or frameworks that may introduce vulnerabilities.\n5. Implement security best practices such as enforcing HTTPS, validating input, and sanitizing output.\n\nDocument findings and suggest necessary configurations to mitigate identified risks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/255", @@ -7741,6 +8012,7 @@ "node_id": "I_kwDOFAM8Ps6PjImU", "title": "Local host:8888 is unreachable", "body": "I can't reach localhost:8888 but I can reach localhost:8025. How can I fix this issue? \r\n![crapi](https://github.com/user-attachments/assets/c16e46e6-438a-4bc3-a309-f520bdafbb54)\r\n", + "summary": "Troubleshoot the issue of localhost:8888 being unreachable while localhost:8025 is accessible. \n\n1. Confirm that the application intended to run on port 8888 is actively running. Check the service status to ensure it is not stopped or crashed.\n\n2. Verify that the port 8888 is not blocked by a firewall or security settings. Use tools like `netstat` or `lsof` to check if the port is in use and if the application is listening on that port.\n\n3. Check the application's configuration files to ensure it is set to bind to the correct address and port. Look for settings related to host and port in the configuration.\n\n4. Investigate any relevant logs from the application for error messages or warnings that could provide insights into why it isn't accessible.\n\n5. If applicable, restart the application to see if the issue resolves itself.\n\n6. Ensure that no other services are conflicting with port 8888. If necessary, change the port in the application configuration to another unused port and test again.\n\nBy following these steps, you can identify and resolve the issue preventing access to localhost:8888.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/260", @@ -7765,10 +8037,11 @@ "pk": 273, "fields": { "nest_created_at": "2024-09-11T19:37:06.496Z", - "nest_updated_at": "2024-09-12T00:14:35.543Z", + "nest_updated_at": "2024-09-13T17:01:07.878Z", "node_id": "I_kwDOFAM8Ps6VvOKo", "title": "docker in running on VM not accesibe Using Host Machine", "body": "**If You're running this docker container on your Ubuntu VM and not able to access using Host machine using ip-address (Ex. 192.168.10.6)**\r\n\r\n- [x] #Do the Following\r\n - First Stop the container using (CTRL+C)\r\n - Open docker-compose.yml and modify the two things \r\n - `crapi-web` service: Under `crapi-web` modify the `ports` sections\r\n - \"0.0.0.0:8888:80\"\r\n - \"0.0.0.0:8888:80\"\r\n - `mailhog` service: modify the `ports` sections\r\n - \"0.0.0.0:8025:8025\"\r\n \r\n **If you have any doubt, referrer the image:**\r\n \r\n \r\n![image](https://github.com/user-attachments/assets/58075994-4ae5-461f-96a1-ffdc4f2981bd)\r\n\r\n![image](https://github.com/user-attachments/assets/128c382c-2644-4c70-aa4f-39ab11f8decc)\r\n", + "summary": "Modify Docker configuration for accessibility from host machine. Follow these steps:\n\n1. Stop the running Docker container using `CTRL+C`.\n2. Open `docker-compose.yml`.\n3. Update the `crapi-web` service:\n - Change the `ports` section to `\"0.0.0.0:8888:80\"` for both entries.\n4. Update the `mailhog` service:\n - Change the `ports` section to `\"0.0.0.0:8025:8025\"`.\n\nEnsure that the changes are saved. After modifying the configuration, restart the Docker container to apply the changes. Verify accessibility from the host machine using the specified IP address (e.g., 192.168.10.6). If issues persist, consult the provided images for additional guidance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/crAPI/issues/274", @@ -7797,6 +8070,7 @@ "node_id": "I_kwDOEyybVc57w796", "title": "Add German translations", "body": "Hi, if you like I could create Norwegian and German translations since I am proficient in those two languages. I could also look over the Spanish translation and see if I can improve the language a bit. My wife is a Spanish language teacher so I am sure I can get some support there. \r\n\r\n", + "summary": "The issue suggests adding German and Norwegian translations, with the author expressing their proficiency in these languages. Additionally, they offer to review the existing Spanish translation for improvements, leveraging support from a Spanish teacher. It may be beneficial to accept the offer and coordinate the translation efforts accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/cornucopia/issues/281", @@ -7823,6 +8097,7 @@ "node_id": "I_kwDOEyybVc58Ovm_", "title": "Add new version of the Online wiki deck that contains the new ASVS 4.0 mapping", "body": "The old wiki deck contains the 3.0 ASVS requirements. see: https://wiki.owasp.org/index.php/Cornucopia_-_Ecommerce_Website_-_VE_2\r\n\r\nThe wiki deck should be updated to account for the 4.0 ASVS requirements.\r\n", + "summary": "The issue requests an update to the Online wiki deck to reflect the new ASVS 4.0 mapping, as the current version includes the outdated ASVS 3.0 requirements. To address this, the wiki deck needs to be revised to incorporate the changes and requirements from ASVS 4.0.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/cornucopia/issues/293", @@ -7845,10 +8120,11 @@ "pk": 276, "fields": { "nest_created_at": "2024-09-11T19:38:23.850Z", - "nest_updated_at": "2024-09-12T03:11:16.828Z", + "nest_updated_at": "2024-09-13T16:54:06.205Z", "node_id": "I_kwDOEyybVc6Jubpf", "title": "🧚🤖 Pixeebot Activity Dashboard", "body": "\"DashList\"\n👋 This dashboard summarizes my activity on the repository, including available improvement opportunities.\n\n\n## Recommendations\n_Last analysis: Sep 11 | Next scheduled analysis: Sep 18_\n\n### Open\n\n ✅ Nice work, you're all caught up!\n\n\n### Available\n\n ✅ Nothing yet, but I'm continuing to monitor your PRs.\n\n\n### Completed\n✅ You merged improvements I recommended [View](https://github.com/OWASP/cornucopia/pulls?q=is%3Apr+is%3Amerged+author%3Aapp%2Fpixeebot)\n✅ I also hardened PRs for you [View](https://github.com/OWASP/cornucopia/pulls?q=%22Hardening+Suggestion%22+in%3Atitle+is%3Apr+author%3Aapp%2Fpixeebot+is%3Amerged+head%3Apixeebot%2F)\n\n## Metrics\n**What would you like to see here?** [Let us know!](https://tally.so/r/mYa4Y5)\n\n## Resources\n\n📚 **Quick links**\nPixee Docs | Codemodder by Pixee\n\n🧰 **Tools I work with**\n[SonarCloud](https://docs.pixee.ai/code-scanning-tools/sonar) | [SonarQube](https://docs.pixee.ai/code-scanning-tools/sonarqube) | [CodeQL](https://docs.pixee.ai/code-scanning-tools/codeql) | [Semgrep](https://docs.pixee.ai/code-scanning-tools/semgrep)\n\n🚀 **Pixee CLI**\nThe power of my codemods in your local development environment. [Learn more](https://github.com/pixee/pixee-cli)\n\n💬 **Reach out**\nFeedback | Support\n\n---\n\n❤️ Follow, share, and engage with Pixee: GitHub | [LinkedIn](https://www.linkedin.com/company/pixee/) | [Slack](https://pixee-community.slack.com/signup#/domain-signup)\n\n\n ![](https://d1zaessa2hpsmj.cloudfront.net/pixel/v1/track?writeKey=2PI43jNm7atYvAuK7rJUz3Kcd6A&event=ACTIVITY_DASHBOARD%7COWASP%2Fcornucopia%7CACTIVITY_DASHBOARD)\n\n", + "summary": "The Pixeebot Activity Dashboard provides a summary of repository activity, including current status and recommendations for improvement. As of the last analysis, all tasks are up to date, with no pending improvements. Recently merged improvements and hardened pull requests have been noted. The dashboard invites feedback on the metrics displayed and encourages users to share suggestions for additional features. Users are also provided with quick links to relevant resources and tools, as well as contact options for support and feedback. To enhance the dashboard, consider providing input on desired metrics.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/cornucopia/issues/549", @@ -7875,6 +8151,7 @@ "node_id": "I_kwDOEyybVc6K2tWJ", "title": "Ensure the converter can create print-ready proofs for print-on-demand jobs", "body": "**Is your feature request related to a problem? Please describe.**\r\n\r\nIn order to drive the project further and create print ready proofs for print-on-demand we need to be able to script the idml to pdf convertion.\r\n\r\n**Describe the solution you'd like**\r\nA clear and concise description of what you want to happen.\r\n\r\nScribus looks promising: https://wiki.scribus.net/canvas/Command_line_scripts#Usefull_'Create_PDF_out_of_existing_scribus_document'_script\r\n\r\nPerhaps we could create a github action for doing the convertion or a python module.\r\n\r\n**Describe alternatives you've considered**\r\n\r\nIndesign server is an alternative, but it’s a commercial product.\r\n\r\n**Additional context**\r\n\r\nCurrently we are not delivering print ready design files. A designer always have install the fonts, open the idml document, clone the back of the card 79 times to the correct place and export to pdf.\r\nInstead we should just be able to deliver final pdfs and not idml with embedded art works.\r\n\r\nAs a middle step we could look into correctly add the links to the graphics from python, but I am afraid it won’t be platform dependent.\r\n", + "summary": "The issue highlights the need for a solution to create print-ready proofs for print-on-demand jobs by automating the IDML to PDF conversion process. The suggested approach involves using Scribus for scripting the conversion or possibly creating a GitHub action or Python module to streamline this process. Currently, the workflow requires manual intervention by designers to export the files, which is inefficient. Alternatives like Indesign Server are noted but are not preferred due to their commercial nature. The next steps should focus on implementing an automated solution that can handle the conversion effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/cornucopia/issues/583", @@ -7901,6 +8178,7 @@ "node_id": "I_kwDOEyybVc6LGIZM", "title": "Build the requirement list on the card using OpenCRE for easier maintenance and collaboration. ", "body": "**Is your feature request related to a problem? Please describe.**\r\n\r\nCurrently the requirement list connected with each card has a tendency to become outdated. There is also too little discussions around them which means that it's not sure all of them are equally relevant for all cards. \r\n\r\n**Describe the solution you'd like**\r\n\r\nWe should be able to build the requirement map on the cards by querying the OpenCRE API. This will invite to more cross-team collaboration and maintenance of the requirements connected to the cards and make the map easier to maintain. \r\n\r\n**Describe alternatives you've considered**\r\n\r\nThe requirement list has been moved out of the word and indesign files, this has helped, but using OpenCRE is the next logical step.\r\n\r\n", + "summary": "The issue highlights the problem of outdated requirement lists associated with cards, which lack sufficient discussion and relevance. The proposed solution is to utilize the OpenCRE API to build a requirement map for these cards, promoting better collaboration and easier maintenance. An alternative considered was moving the requirement list out of word and design files, which has already shown some improvement. \n\nNext steps could involve integrating the OpenCRE API to enhance the requirement mapping process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/cornucopia/issues/595", @@ -7927,6 +8205,7 @@ "node_id": "I_kwDOEyybVc6LGK9o", "title": "Language review of the translations.", "body": "**Is your feature request related to a problem? Please describe.**\r\n\r\nWe now have the decks translated into 6 languages, but we have not done a proper review of the language. The English, Norwegian and Dutch versions are probably fine, but what about the rest? \r\n\r\n**Describe the solution you'd like**\r\n\r\nGet someone that has been working as a native language teacher or translator to have a look at the translations in order to make sure the translations are properly done.\r\n", + "summary": "The issue highlights the need for a comprehensive review of translations for decks available in six languages. While the English, Norwegian, and Dutch versions are assumed to be acceptable, there is uncertainty regarding the quality of the other translations. The proposed solution involves engaging a native language teacher or translator to assess and ensure the accuracy and appropriateness of all translations. It may be beneficial to identify and contact qualified individuals for this review process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/cornucopia/issues/596", @@ -7953,6 +8232,7 @@ "node_id": "I_kwDOEdhCDs47aYXL", "title": "OWASP threat modeling references", "body": "Add references to other OWASP threat modeling projects and resources", + "summary": "The issue suggests adding references to various OWASP threat modeling projects and resources to enhance the existing documentation. To address this, it would be beneficial to gather a list of relevant OWASP resources and incorporate them into the documentation for better accessibility and guidance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-threat-modeling-playbook/issues/3", @@ -7979,6 +8259,7 @@ "node_id": "I_kwDOEdhCDs47aYeL", "title": "Include a list of threat modeling tools", "body": "As part of the tooling step", + "summary": "The issue suggests that a list of threat modeling tools should be included in the documentation, specifically within the tooling section. To address this, the team should compile and add relevant tools to enhance the resource's comprehensiveness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-threat-modeling-playbook/issues/4", @@ -8001,10 +8282,11 @@ "pk": 282, "fields": { "nest_created_at": "2024-09-11T19:39:37.166Z", - "nest_updated_at": "2024-09-11T19:39:37.166Z", + "nest_updated_at": "2024-09-13T15:14:15.785Z", "node_id": "I_kwDOEdhCDs47aYqV", "title": "Include a list of threat modeling methodologies", "body": "As part of the selecting a methodology step", + "summary": "The issue requests the inclusion of a list of threat modeling methodologies within the documentation, specifically during the selection process of a methodology. It suggests enhancing the guidance provided to users when they are choosing a suitable threat modeling approach. To address this, a compilation of existing threat modeling methodologies should be created and added to the relevant section of the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-threat-modeling-playbook/issues/5", @@ -8031,6 +8313,7 @@ "node_id": "I_kwDOEcPlPM5hiKgz", "title": "💻✅ Incorporate Good Practices back into the handbook", "body": "https://owasp.org/www-committee-project/#div-practice", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/21", @@ -8053,10 +8336,11 @@ "pk": 284, "fields": { "nest_created_at": "2024-09-11T19:39:46.047Z", - "nest_updated_at": "2024-09-11T19:39:46.047Z", + "nest_updated_at": "2024-09-13T03:50:56.619Z", "node_id": "I_kwDOEcPlPM5hiLKX", "title": "📝👀 Update the .github/README.md for the OWASP Github page to better reflect projects and their statuses.", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/22", @@ -8085,6 +8369,7 @@ "node_id": "I_kwDOEcPlPM5hjFys", "title": "📋🌟 create a list or projects or update the existing one to show last release, commit #, last commit, stars,", "body": "### Discussed in https://github.com/OWASP/www-committee-project/discussions/44\r\n\r\n
\r\n\r\nOriginally posted by **DonnieBLT** March 22, 2023\r\nsee this as a starting point https://github.com/psiinon/owasp-projects \r\n\r\nwe can create a github action that runs daily to update the list\r\nmake sure it shows - \r\nprimary language, license, Forks, stars, Issues, PRs, issues need help last updated, Release version, date released \r\nfigure out a way to show project statuses from each github repo to link roadmap issues similar to how the github list of repos has the stats
", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/48", @@ -8111,6 +8396,7 @@ "node_id": "I_kwDOEcPlPM5jQpN5", "title": "HANDBOOK: Discuss/document legal review for project donations", "body": "Nothing about legal review for project donations. There should be. The OWASP Foundation should provide this service if they don't already. The foundation will be unable to protect project leaders without the necessary due diligence, which is one of the benefits of joining a foundation in the first place. This needs to be addressed.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/58", @@ -8141,6 +8427,7 @@ "node_id": "I_kwDOEcPlPM5jQpdQ", "title": "HANDBOOK: Document copyright transfer", "body": "No mention of copyright transfer. We should discuss this in a future committee meeting, decide on a path forward and document the requirements in the handbook.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/59", @@ -8171,6 +8458,7 @@ "node_id": "I_kwDOEcPlPM5jQp5i", "title": "HANDBOOK: Prescribe OSS licenses", "body": "Some OSI licenses are anti-business or may lead to unintended legal requirements to disclose IP based on a deployments configuration. We should be prescriptive in the licenses we will accept.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/60", @@ -8201,6 +8489,7 @@ "node_id": "I_kwDOEcPlPM5jQqB6", "title": "HANDBOOK: Require CODEOWNERS and SECURITY.md", "body": "We should require a CODEOWNERS file and SECURITY.md in every project repo.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/61", @@ -8231,6 +8520,7 @@ "node_id": "I_kwDOEcPlPM5jQqVK", "title": "HANDBOOK: JIRA onboarding?", "body": "The new project request form takes you to JIRA, which requires an account. Future leaders will use their personal or work email address. We should encourage them to use their OWASP email address, but they won't have one yet. Therefore, I don't think JIRA is the right intake process. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/62", @@ -8261,6 +8551,7 @@ "node_id": "I_kwDOEcPlPM5jQqiQ", "title": "HANDBOOK: Clarify project types", "body": "We need to clarify project types. CycloneDX is the only \"Standard\" project that I'm aware of. Other \"standards\" like ASVS are categorized as documentation projects. We need to discuss this. CycloneDX does not fit into any existing project types. But the creation of a \"standard\" project type may be problematic with ASVS and others that are a different type of standard.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/63", @@ -8291,6 +8582,7 @@ "node_id": "I_kwDOEcPlPM5k_oQ6", "title": "Remove other \"project handbooks\" ", "body": "there are a few places where the handbook is mentioned see below: - would be good to consolidate them all into one official repo or file and remove all copies of the previous handbook and forward them to the new one\r\n\r\n1. https://github.com/OWASP/www-policy/blob/master/unpublished/project-handbook.md\r\n2. https://github.com/OWASP-Foundation/Project-Handbook\r\n3. https://owasp.org/www-policy/operational/projects\r\n4. https://owasp.org/www-pdf-archive/PROJECT_LEADER-HANDBOOK_2014.pdf\r\n5. https://owasp.org/www-pdf-archive//OWASPProjectsHandbook2013.pdf\r\n6. https://github.com/OWASP/www-committee-project/tree/master/handbook", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/68", @@ -8321,6 +8613,7 @@ "node_id": "I_kwDOEcPlPM5o2MvR", "title": "Setup a Github Action to monitor project pages for activity ", "body": "1) if a project has not changed it's project page in 1 week, send a reminder\r\n2) if a project has not had updates for 6 months send a reminder\r\n3) if a project has not had updates for 2 years, send an archive notice\r\n4) if a project has not had updates for 3 years, archive it", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-project/issues/71", @@ -8343,10 +8636,11 @@ "pk": 294, "fields": { "nest_created_at": "2024-09-11T19:40:14.558Z", - "nest_updated_at": "2024-09-11T19:40:14.558Z", + "nest_updated_at": "2024-09-13T15:14:34.494Z", "node_id": "I_kwDOEZvT985B4hvR", "title": "bug: acurl command bug", "body": "error msg:\r\n```\r\n~ ❯❯❯ acurl http://localhost:8000/DescribeIpReport\r\nTraceback (most recent call last):\r\n File \"gurl/__main__.py\", line 32, in \r\n main()\r\n File \"gurl/__main__.py\", line 20, in main\r\n res = gurl.parse_curl_trace(f.read())\r\n File \"/venv/lib/python3.8/site-packages/gurl/__init__.py\", line 104, in parse_curl_trace\r\n reqres[\"_meta\"][\"curl_log\"] = log\r\nTypeError: 'NoneType' object is not subscriptable\r\n~ ❯❯❯\r\n```", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/apicheck/issues/8", @@ -8369,10 +8663,11 @@ "pk": 295, "fields": { "nest_created_at": "2024-09-11T19:40:32.779Z", - "nest_updated_at": "2024-09-12T00:14:41.132Z", + "nest_updated_at": "2024-09-13T17:01:03.793Z", "node_id": "I_kwDOEWezOc5wUjF7", "title": "Is this initiative still active?", "body": "Haven't seen updates since 2021. I've been looking for uses of ontology to derive threats from descriptions of infrastructure. This one at least derives them from DFDs, but in Threat Dragon format. ", + "summary": "Verify the current status of the initiative by checking for recent commits or activity on the repository. Investigate the last updates made in 2021 to determine if there are any pending issues or discussions. Explore the possibility of integrating ontology for threat analysis in infrastructure descriptions. Consider analyzing the existing Threat Dragon format to understand how threats are derived from Data Flow Diagrams (DFDs). \n\nAs initial steps, review the documentation and codebase for insights into the methodology used for threat derivation. Engage with the community by posting inquiries or suggestions on the GitHub issue tracker. Propose enhancements or collaborations to reactivate the project and expand its capabilities.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OdTM/issues/1", @@ -8399,6 +8694,7 @@ "node_id": "I_kwDOETRnAc4-CaNB", "title": "Possible new ideas for challenges", "body": "This ticket is for creating/listing possible ideas. If an Idea is picked up by a developer, then it gets its own tickets. \r\n- [x] #44\r\n- [x] #43\r\n- [x] Google support (https://github.com/commjoen/wrongsecrets/issues/40, https://github.com/commjoen/wrongsecrets/issues/39), \r\n- [x] #93\r\n- [x] Alibaba cloud support (will not do this, maybe have a write up later?)\r\n- [x] #299\r\n- [x] Heroku support\r\n- [x] #144\r\n- [x] Secret in logs (= challenge 8)\r\n- [x] #187\r\n- [x] #188\r\n- [x] #189\r\n- [x] #199\r\n- [x] #200\r\n- [x] #201\r\n- [x] #148\r\n- [x] Hardcoded in testcode (https://github.com/commjoen/wrongsecrets/issues/37#issuecomment-1011482070)\r\n- [x] #296\r\n- [ ] #810\r\n- [x] #815\r\n- [ ] #297\r\n- [ ] #811\r\n- [ ] #812\r\n- [x] have a too long living OIDC token which can be used to extract and apply (wont'do)\r\n- [x] Jenkins or other github secondary ci/cd secret (won't do, as it requiers another container next to it & needs maintenance. Our current ci/cd action shows the issue already.\r\n- [ ] #345\r\n- [ ] Simple one that is a mix of 1 & 13: docker container is run with password as parameter, but the whole command is placed in a .sh file and stored in the git repo (aka: use .gitignore to block local helper scripts)\r\n- [x] #344\r\n- [x] SOPS/sealed secrets misconfig : a bogus sealed secret with misconfigured retrieval setup? (pending)\r\n- [ ] #615\r\n- [x] #614\r\n- [ ] #613\r\n- [x] #377\r\n- [x] #616 \r\n- [x] #809\r\n- [x] #813\r\n- [x] Based on @robvanderveer his suggestion: https://github.com/OWASP/wrongsecrets/issues/616\r\n- [ ] Have passwordless challenges based on impersonation such as https://github.com/OWASP/wrongsecrets/blob/master/src/main/resources/explanations/challenge11_hint-azure.adoc - agreed to not create a new challenge, but extend GCP/AWS with a similar solution for cahllenge 11.\r\n- [x] #814\r\n- [ ] Bad RSA private key redaction; https://www.hezmatt.org/~mpalmer/blog/2020/05/17/private-key-redaction-ur-doin-it-rong.html (tip from @nbaars )\r\n- [ ] A kotlin binary", + "summary": "Create a list of potential challenges for developers to tackle. Each selected idea should be expanded into its own ticket. Review the following ideas and select those you wish to pursue:\n\n1. Implement Google support issues referenced in tickets #40 and #39.\n2. Explore Alibaba cloud support, with a possible write-up later.\n3. Add Heroku support.\n4. Address the challenge related to secrets in logs (challenge 8).\n5. Investigate hardcoded credentials in test code as noted in issue #37.\n6. Develop a challenge featuring a long-lived OIDC token for extraction and application (decide not to implement).\n7. Suggest a challenge involving Docker containers run with passwords as parameters, stored in a .sh file within the repo (ensure to use .gitignore).\n8. Create challenges based on misconfigured SOPS/sealed secrets as discussed.\n9. Consider passwordless challenges involving impersonation, potentially extending existing GCP/AWS challenges.\n\nBegin by filtering through the ideas above, selecting a few that align with current priorities. Create new tickets for each selected idea, outlining specific acceptance criteria and technical requirements needed for implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/37", @@ -8427,6 +8723,7 @@ "node_id": "I_kwDOETRnAc5Ajjwq", "title": "Record howto's", "body": "Let's have a recording of the solutions!", + "summary": "Create a repository for recording how-to solutions. Implement a structured format for documenting each solution, including prerequisites, step-by-step instructions, and code snippets where applicable. Use Markdown for readability and organization.\n\n1. Set up a README file to outline the purpose of the repository and how to contribute.\n2. Choose a recording tool or software that allows you to capture screen activity and audio simultaneously.\n3. Establish a consistent naming convention for the recorded files to ensure easy navigation.\n4. Develop a template for submitting how-to recordings, including sections for problem description, solution steps, and any relevant links or references.\n5. Encourage contributors to test their solutions before recording to ensure accuracy.\n6. Create a directory structure in the repository for categorizing recordings by topic or technology.\n\nBegin by recording a few initial how-to solutions as examples to guide future contributions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/131", @@ -8457,6 +8754,7 @@ "node_id": "I_kwDOETRnAc5D9H6b", "title": "Improve ui of the overall app", "body": "After talking to @mikewoudenberg about the front-end of the application, we realized it can use some love!\r\nMike will help us to get into a better design together. This is what this ticket is all about!", + "summary": "Improve the UI of the overall application. Collaborate with @mikewoudenberg to enhance the front-end design. \n\nFirst steps to tackle the problem:\n1. Conduct a UI audit to identify areas needing improvement.\n2. Gather user feedback to understand pain points and preferences.\n3. Create wireframes and design prototypes for proposed changes.\n4. Implement changes incrementally, focusing on high-impact areas first.\n5. Test the new designs with users for feedback and iterate as needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/198", @@ -8488,6 +8786,7 @@ "node_id": "I_kwDOETRnAc5EAKfQ", "title": "create a secrets detection testbed branch with revoked credentials", "body": "Steps to take:\r\n\r\n- [x] List secrets present in this issue\r\n- [x] Create a link to this issue in the web interface https://github.com/commjoen/wrongsecrets/pull/250\r\n- [x] Implement keys below in a separate branch with a script to generate a container from it which can be scanned\r\n- [x] Add master merging script for the .github/scripts/docker-create-and-push.sh \r\n\r\nKeys that can be added:\r\n\r\n- [x] Azure\r\n- [x] AWS\r\n- [x] GCP\r\n- [x] Git credentials :SSH key\r\n- [x] Git credentials: developer token\r\n- [x] private key RSA key & private ECC key\r\n- [x] GPG keychain (armored and notarmored)\r\n- [x] AES keys\r\n- [x] Slack callback\r\n- [x] kubeconfig\r\n- [x] QR-code (will be hard to represent a secret which is detactable other than through entropy,skipping it)\r\n- [x] BasicAuth\r\n- [x] gradle credentials\r\n- [x] mvn credentials\r\n- [x] NPM\r\n- [x] Firebase push notification keys (android/ios)\r\n- [x] OTP Seed\r\n- [x] segment.io access keys\r\n- [x] Vault root token & unseal keys\r\n- [x] Any JWT Token\r\n- [x] Gitlab PAT\r\n- [x] Gpg armoured export private and public keys\r\n- [x] Azure devops access token\r\n- [x] onepassword emergency kit, 1password-credentials.json and accesstokens\r\n- [x] keybase paperkey\r\n- [ ] IBM-cloud?\r\n- [ ] Nomad credentials (wait till https://github.com/commjoen/wrongsecrets/issues/299 happens)\r\n- [ ] Spring boot Session token\r\n- [x] Slack access tokens:bottoken & usertoken, applevel token & config token (requested..pending)\r\n- [ ] Braze API-keys (requested)\r\n- [ ] Lastpass integration/api-key\r\n- [ ] Confidant key\r\n- [x] Docker hub access token\r\n- [x] Vagrant cloud access token\r\n- [ ] confluence/jira secrets\r\n- [ ] AWS instance profile\r\n- [ ] add dockerconfig (https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) \r\n- [ ] secrets above with encodings (base64, Hex encoding)\r\n- [ ] Database connection strings\r\n- [ ] OIDC token\r\n\r\nwhich other secret would you like to add? please comment\r\n", + "summary": "Create a branch for secrets detection testbed with revoked credentials. Follow these steps:\n\n1. **List Secrets**: Compile a comprehensive list of secrets to be included, such as:\n - Azure, AWS, GCP credentials\n - Git credentials (SSH key, developer token)\n - Private keys (RSA, ECC)\n - GPG keychain (both armored and not armored)\n - AES keys, Slack callback, kubeconfig, BasicAuth\n - Gradle, Maven, NPM credentials\n - Firebase push notification keys, OTP Seed\n - Segment.io access keys, Vault root token & unseal keys\n - Any JWT token, GitLab PAT, Azure DevOps access token\n - 1Password emergency kit, Keybase paperkey\n - Docker hub, Vagrant cloud access tokens\n - Additional secrets as suggested by comments (e.g., IBM Cloud, Nomad, Spring Boot session token, etc.).\n\n2. **Create Issue Link**: Reference the issue in the web interface by linking to `https://github.com/commjoen/wrongsecrets/pull/250`.\n\n3. **Implement Keys in a Branch**: Create a separate branch to implement the above keys. Develop a script that generates a container capable of scanning these secrets.\n\n4. **Add Merging Script**: Incorporate a master merging script within the `.github/scripts/docker-create-and-push.sh` file.\n\n5. **Consider Encodings**: Ensure to include secrets with various encodings such as Base64 and Hex encoding.\n\n6. **Database and OIDC Tokens**: Address the inclusion of database connection strings and OIDC tokens in the secrets list.\n\n7. **Seek Additional Input**: Encourage community input on additional secrets to incorporate by requesting comments.\n\nBegin by compiling the list of secrets and creating the new branch.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/201", @@ -8516,6 +8815,7 @@ "node_id": "I_kwDOETRnAc5Gp34J", "title": "Create optional low value keys which can be used as canarotykens in AWS/GCP", "body": "", + "summary": "Implement optional low-value keys for use as canary tokens in AWS and GCP. Define the key characteristics and requirements for these tokens, ensuring they can be easily integrated into existing infrastructure.\n\n1. Research existing canary token implementations in AWS and GCP to establish best practices.\n2. Design a schema for the low-value keys, specifying the format, expiration, and usage limits.\n3. Develop a mechanism for generating and managing these tokens, including a secure storage solution.\n4. Create documentation outlining the setup process and examples of how to use the tokens in various scenarios.\n5. Test the implementation in a staging environment to ensure functionality and security.\n6. Roll out the solution and gather feedback for potential improvements.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/236", @@ -8540,10 +8840,11 @@ "pk": 301, "fields": { "nest_created_at": "2024-09-11T19:41:27.003Z", - "nest_updated_at": "2024-09-11T22:53:33.754Z", + "nest_updated_at": "2024-09-13T17:07:13.065Z", "node_id": "I_kwDOETRnAc5Jz1RE", "title": "Add hardcoded encryption key on top of a secret.", "body": "Have a challenge about \"bad encryption practices\" where we hardocde the key and the secret in java.\r\n\r\nSteps to take:\r\n\r\n- create a secret on your computer\r\n- encrypt it with your algorithm of choice (could be AES, 3DES, whatever you like) on your computer resulting in the ciphertext you need\r\n- now take the ciphertext and put that in the java code of the challenge\r\n- add the key you used to encrypt the challenge and a decryption method to the challenge. See contributing.md on how to create the challenge.", + "summary": "Implement a challenge showcasing \"bad encryption practices\" by hardcoding an encryption key alongside a secret in Java.\n\n1. **Create a Secret**: Generate a meaningful secret on your local machine.\n \n2. **Encrypt the Secret**: Use an encryption algorithm of your choice (e.g., AES, 3DES) to encrypt the secret, producing the ciphertext.\n\n3. **Integrate Ciphertext into Code**: Embed the generated ciphertext directly into the Java code for the challenge.\n\n4. **Add Hardcoded Key**: Include the encryption key used for the encryption process in the code.\n\n5. **Implement Decryption Method**: Write a method that can decrypt the ciphertext using the hardcoded key.\n\n6. **Refer to Contributing Guidelines**: Consult `contributing.md` for specific instructions on how to format and add the challenge to the repository.\n\nBy following these steps, create a clear example of poor encryption practices for educational purposes.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/297", @@ -8576,6 +8877,7 @@ "node_id": "I_kwDOETRnAc5Ng0D5", "title": "Log secret encoded from your cloud app towards the logging solution of your cloudprovider.", "body": "Have a challenge for all 3 cloudproviders, where we log the actual secret inot the logs of the cloudprovider only.\r\n- [ ] log secret in AWS Cloudwatch\r\n- [ ] log secret in GCP stackdriver\r\n- [ ] log secret in Azure log Analytics", + "summary": "Log secrets encoded from your cloud application to the logging solutions of the three major cloud providers: AWS, GCP, and Azure. \n\n1. **AWS CloudWatch**: Implement logging functionality that captures the secret and sends it to CloudWatch logs. Use AWS SDK to configure logging settings, ensuring that the secret is encoded properly before logging. Verify the output in the CloudWatch console.\n\n2. **GCP Stackdriver**: Utilize Google Cloud logging libraries to encode the secret from your application and log it into Stackdriver. Ensure that the logging configuration is set up correctly to capture and store the logs. Check the logs in the GCP console for successful logging.\n\n3. **Azure Log Analytics**: Configure your Azure application to encode the secret and send it to Log Analytics. Use Azure Monitor SDK or REST APIs to facilitate the logging process. Validate the logs in the Azure portal to confirm that the secret is recorded.\n\n**First Steps**:\n- Set up logging libraries for each cloud provider in your application.\n- Ensure that proper encoding mechanisms are in place for the secrets before logging.\n- Test logging functionality in a development environment to confirm that logs are being captured as intended.\n- Review cloud provider documentation for specific logging API details and best practices.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/345", @@ -8605,6 +8907,7 @@ "node_id": "I_kwDOETRnAc5PLCdi", "title": "Create improved documentation", "body": "Have something like https://madhuakula.com/kubernetes-goat/ but then for wrognsecrets and reorganize the docs and wiki here.\r\n\r\n\r\n\r\n\r\nRaw notes: doxRS @madhuakula can start in same repo, lets see if we can include https://github.com/commjoen/wrongsecrets/tree/fix-container/src/main/resources/explanations automatically so we don't need to copy-paste. ", + "summary": "Improve documentation for the WrongSecrets project. \n\n1. Create a comprehensive resource similar to the existing Kubernetes Goat documentation found at https://madhuakula.com/kubernetes-goat/.\n2. Reorganize the current documentation and wiki to enhance clarity and usability.\n3. Utilize the existing explanations from the WrongSecrets GitHub repository (https://github.com/commjoen/wrongsecrets/tree/fix-container/src/main/resources/explanations) to avoid manual copy-pasting. \n4. Explore ways to automate the integration of these explanations into the new documentation structure.\n\nFirst steps to tackle the problem:\n- Assess the current state of documentation and identify key areas for improvement.\n- Review the content available in the WrongSecrets repository for potential inclusion.\n- Develop a structured outline for the new documentation layout.\n- Implement a strategy to automate the integration of explanations from the repository.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/368", @@ -8635,6 +8938,7 @@ "node_id": "I_kwDOETRnAc5Q4lhz", "title": "Find and fix memory leak", "body": "after running `hey` twice against `/challenge/1` we see that the amount of sessions build up to 400, and memory consumption goes from 200Mb to 277 MB after which, when all sessions are released, we still see 274 MB in use (e.g. 70 MB is not GCed?)", + "summary": "Investigate and resolve the memory leak issue observed when running the `hey` tool against the `/challenge/1` endpoint. After executing the test twice, track the session count, which escalates to 400, and monitor memory usage that increases from 200 MB to 277 MB. Upon releasing all sessions, confirm that 274 MB remains allocated, indicating that 70 MB has not been garbage collected.\n\nTake the following initial steps to tackle the problem:\n\n1. **Profile Memory Usage**: Use a memory profiler (e.g., VisualVM, Memory Analyzer) to identify objects that are not being collected by the garbage collector after sessions are released.\n\n2. **Review Session Management**: Examine the session handling logic in the application. Check for any references to session objects that may be retained unintentionally, preventing garbage collection.\n\n3. **Check for Strong References**: Look for strong references to session-related data that might be causing memory retention. Replace them with weak references if appropriate.\n\n4. **Analyze Code Paths**: Review the code paths executed during the two runs with `hey`. Pay special attention to any resources that are not being properly disposed of or released, such as database connections or file handles.\n\n5. **Implement Logging**: Add logging around session creation and destruction to monitor their lifecycle and track potential leaks.\n\n6. **Run Tests**: After implementing changes, run the tests again to verify if memory usage stabilizes and sessions are properly garbage collected.\n\n7. **Document Findings**: Record the findings, changes made, and their impact on memory consumption to assist with future debugging and maintenance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/389", @@ -8666,6 +8970,7 @@ "node_id": "I_kwDOETRnAc5Sbitv", "title": "Have a github action to compare git-secrets and trufflehog without any configuration update", "body": "Create a multistage pipelien in which we check the performance of \r\n- [ ] git-secrets (https://github.com/awslabs/git-secrets) \r\n- [ ] git-leaks (https://github.com/zricethezav/gitleaks) - https://github.com/gitleaks/gitleaks-action (only commercial? will have to ask for a license key...)\r\n - [ ] request license key\r\n - [ ] do scanning\r\n - [ ] capture result in count\r\n- [ ] trufflehog (https://github.com/trufflesecurity/trufflehog) - https://github.com/marketplace/actions/trufflehog-oss \r\n - [x] do scanning (see https://github.com/OWASP/wrongsecrets/actions/workflows/scanners.yml, will have to update it with more scanners and capturing)\r\n - [ ] capture result in count\r\n- [ ] trufflehog3 \r\n- [ ] Whispers (https://github.com/Skyscanner/whispers) - https://github.com/Skyscanner/whispers/issues/72 \r\n- [ ] Github secret scanning (https://docs.github.com/en/developers/overview/secret-scanning-partner-program) \r\n- [ ] gittyleaks (https://github.com/kootenpv/gittyleaks)\r\n- [ ] git-all-secretss (https://github.com/anshumanbh/git-all-secrets)\r\n- [ ] detect-secrets (https://github.com/Yelp/detect-secrets)\r\n- [ ] Java Sast tool?\r\n\r\nfor their detection out of the box.", + "summary": "Implement a GitHub Action to compare the effectiveness of various secret scanning tools without requiring configuration updates. Create a multistage pipeline that includes the following steps:\n\n1. **Assess Git-Secrets**:\n - Integrate and execute git-secrets to validate its performance.\n - Link: [git-secrets](https://github.com/awslabs/git-secrets).\n\n2. **Incorporate Git-Leaks**:\n - Request a license key if necessary for git-leaks.\n - Perform scanning with Git-Leaks.\n - Capture and log the results in terms of detected secrets.\n - Link: [git-leaks](https://github.com/zricethezav/gitleaks) and [gitleaks-action](https://github.com/gitleaks/gitleaks-action).\n\n3. **Utilize TruffleHog**:\n - Execute scanning using TruffleHog.\n - Update the existing workflow to include more scanners and ensure results are captured.\n - Link: [trufflehog](https://github.com/trufflesecurity/trufflehog) and [trufflehog-oss action](https://github.com/marketplace/actions/trufflehog-oss).\n\n4. **Evaluate TruffleHog3**:\n - Add TruffleHog3 to the pipeline for comparison.\n\n5. **Integrate Whispers**:\n - Include the Whispers tool into the pipeline.\n - Link: [Whispers](https://github.com/Skyscanner/whispers).\n\n6. **Add GitHub Secret Scanning**:\n - Integrate GitHub's built-in secret scanning capabilities.\n - Link: [GitHub Secret Scanning](https://docs.github.com/en/developers/overview/secret-scanning-partner-program).\n\n7. **Implement Additional Tools**:\n - Add gittyleaks, git-all-secrets, and detect-secrets to the analysis.\n - Links:\n - [gittyleaks](https://github.com/kootenpv/gittyleaks)\n - [git-all-secrets](https://github.com/anshumanbh/git-all-secrets)\n - [detect-secrets](https://github.com/Yelp/detect-secrets)\n\n8. **Consider Java SAST Tools**:\n - Explore Java static application security testing (SAST) tools for further analysis.\n\n**First Steps**:\n- Set up a new GitHub Action workflow file.\n- Define the pipeline stages and include each tool as a separate job.\n- Start with integrating git-secrets and trufflehog to establish a baseline.\n- Ensure each tool’s results are captured and logged effectively.\n- Gradually add more tools and functionalities to the pipeline based on initial performance metrics.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/424", @@ -8694,6 +8999,7 @@ "node_id": "I_kwDOETRnAc5SbjNS", "title": "Create a separate repo where you allow actions to run, so you can get the secret out", "body": "Create a separate repo where you allow actions to run, so you can get the secret out. The idea would be that you have a challenge with a forkable action+Secret, so anyone forking this repository should get access to the given secret.\r\nTODO:\r\n- [ ] Have the Github secret forkable\r\n- [ ] Create a github action using it\r\n- [ ] Create the actual challenge with the secret encrypted as part of the java code (See contributing.md on how to create the challenge)", + "summary": "Create a separate GitHub repository to enable actions to run and expose a secret. Ensure that the repository allows forking so that any user who forks the repo can access the designated secret. \n\n**Technical Steps:**\n1. **Configure GitHub Secrets:** Set up the repository to include a GitHub secret that is forkable.\n2. **Develop GitHub Action:** Create a GitHub Action that utilizes the forkable secret.\n3. **Design Challenge:** Implement the challenge by encrypting the secret within the Java code. Refer to `contributing.md` for detailed instructions on how to structure the challenge.\n\n**First Steps:**\n- Set up a new repository on GitHub.\n- Research GitHub's handling of secrets in forked repositories to ensure the secret is accessible.\n- Draft the initial version of the GitHub Action and test its functionality with a sample secret.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/425", @@ -8722,6 +9028,7 @@ "node_id": "I_kwDOETRnAc5aLa_f", "title": "Create challengecreatorcli", "body": "In order to speed up challengecreation we can best create a cli which creates the required class file and asciidoc. It only needs options like:\r\n- type (cloud/k8s/docker)\r\n- Title (Freeform)\r\nAnd then generate the required challenge file and the asciidoc ", + "summary": "Create a command-line interface (CLI) named `challengecreatorcli` to streamline challenge creation. The CLI should accept the following options:\n\n- **Type**: Specify the type of challenge (options include `cloud`, `k8s`, or `docker`).\n- **Title**: Provide a freeform title for the challenge.\n\nUpon execution, the CLI must perform the following actions:\n\n1. Generate the required class file based on the specified type.\n2. Create an AsciiDoc file that includes the challenge title and relevant content.\n\nTo tackle this problem:\n\n1. Set up a new CLI project using a suitable programming language (e.g., Python, Node.js).\n2. Implement argument parsing to handle the `type` and `title` options.\n3. Define templates for the class file and AsciiDoc to ensure consistent formatting.\n4. Write functions to generate the class file and AsciiDoc file based on the user inputs.\n5. Test the CLI with various inputs to ensure it creates files correctly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/555", @@ -8752,6 +9059,7 @@ "node_id": "I_kwDOETRnAc5eRsJe", "title": "Have a secret in .gitignore /.ssh", "body": "- [ ] Create a branch with added secret in .gitignore and a secret in .ssh\r\n- [ ] Create the challenges for both where the secrets for both files are having their own separate challenge with encryption of the secrets to hide them from detection engines.", + "summary": "1. Create a new branch dedicated to secrets management.\n2. Add a secret file to the `.gitignore` to prevent it from being tracked.\n3. Place an additional secret in the `.ssh` directory, ensuring it is also included in the `.gitignore`.\n4. Develop separate challenges for each secret. \n5. Implement encryption methods for the secrets to obscure them from detection engines. \n6. Investigate suitable encryption algorithms (e.g., AES, RSA) for securing the secrets.\n7. Test the implementation to confirm that the secrets are encrypted properly and remain undetected.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/613", @@ -8784,6 +9092,7 @@ "node_id": "I_kwDOETRnAc5eRsLw", "title": "Native languages round 2: SWift! ", "body": "This challenge is about finding hardcoded secrets in binaries in Swift! With this we want to explain to our users that no language or binary is safe to just put the secret in offline. For this you need to:\r\n\r\n- [ ] Add a swift binary to the [wrongsecrets/binaries ](https://github.com/OWASP/wrongsecrets-binaries) repowith crosscompiling for the various OSes\r\n- [ ] Add a challenge here that uses the binary (See contributing.md and the code of the other binary challenges).", + "summary": "Create a Swift binary to demonstrate the challenge of finding hardcoded secrets within binaries. Follow these steps:\n\n1. **Add Swift Binary**: Implement a Swift binary and place it in the [wrongsecrets/binaries](https://github.com/OWASP/wrongsecrets-binaries) repository. Ensure the binary supports cross-compilation for various operating systems.\n\n2. **Develop Challenge**: Create a corresponding challenge that utilizes the new binary. Refer to the guidelines in `contributing.md` and examine the code of existing binary challenges for structure and style.\n\n3. **Technical Details**:\n - Use Swift's features to hardcode secrets in the binary.\n - Ensure the binary can be built for macOS, Linux, and Windows.\n - Document the build process for each OS in the repository.\n\n4. **First Steps**:\n - Set up your Swift development environment.\n - Create a simple Swift application that includes hardcoded secrets.\n - Test cross-compilation for different operating systems.\n - Draft the challenge description and expected outcomes for users engaging with the binary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/615", @@ -8815,6 +9124,7 @@ "node_id": "I_kwDOETRnAc5f-HcJ", "title": "aad-pod-identity is no longer supported & K8S namespace does not enforce restricted policy in Azure", "body": "In order to complete the migration to Kubernetes 1.25 on AKS and enforce the `restricted` psa in the default namespace, we need to migrate from `aad-pod-identity` to https://azure.github.io/azure-workload-identity/docs/\r\n\r\nIssue is caused by #652 #646\r\n\r\nPlease note that: if you want to pick up this issue, you have to have experience with:\r\n- terraform\r\n- helm\r\n- Azure AKS\r\n- Azure workload identity", + "summary": "Migrate from `aad-pod-identity` to Azure Workload Identity for Kubernetes 1.25 on AKS. Enforce the `restricted` Pod Security Admission (PSA) policy in the default namespace. \n\nAddress the issues caused by #652 and #646. \n\nImplement the following first steps:\n1. Review the Azure Workload Identity documentation to understand the migration process.\n2. Assess the current setup of `aad-pod-identity` in your AKS environment.\n3. Plan the migration strategy, ensuring minimal downtime and compatibility with existing workloads.\n4. Update Terraform scripts to use Azure Workload Identity configurations.\n5. Modify Helm charts to integrate with the new identity mechanism.\n6. Test the migration in a staging environment before deploying to production. \n\nEnsure you have experience with Terraform, Helm, Azure AKS, and Azure Workload Identity before proceeding.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/670", @@ -8845,6 +9155,7 @@ "node_id": "I_kwDOETRnAc5g53rq", "title": "DAST scan - Investigate & fix if required results of the ZAP scan", "body": "\r\nA ZAP baseline scan demonstrates several issues. Investigate each of these issues and create a fix if required, or leave on ignore if not relevant. Full ZAP report can be found in GitHub Actions.\r\n\r\nIssues:\r\n- [ ] Information Disclosure - Suspicious Comments [10027]\r\n- [ ] User Controllable HTML Element Attribute (Potential XSS) [10031]\r\n- [ ] Non-Storable Content [10049]\r\n- [ ] Cookie without SameSite Attribute [10054]\r\n- [ ] CSP: Wildcard Directive [10055]\r\n- [ ] Permissions Policy Header Not Set [10063]\r\n- [ ] Modern Web Application [10109]\r\n- [ ] Dangerous JS Functions [10110]\r\n- [ ] Loosely Scoped Cookie [90033]\r\n\r\n### Please provide relevant logs\r\n\r\n```\r\nIGNORE: Information Disclosure - Suspicious Comments [10027] x 14 \r\n\thttp://localhost:8080/webjars/bootstrap/5.2.3/js/bootstrap.bundle.min.js (200 OK)\r\n\thttp://localhost:8080/webjars/datatables/1.13.2/js/dataTables.bootstrap5.min.js (200 OK)\r\n\thttp://localhost:8080/webjars/datatables/1.13.2/js/jquery.dataTables.min.js (200 OK)\r\n\thttp://localhost:8080/webjars/github-buttons/2.14.1/dist/buttons.js (200 OK)\r\n\thttp://localhost:8080/webjars/jquery/3.6.3/jquery.js (200 OK)\r\nIGNORE: User Controllable HTML Element Attribute (Potential XSS) [10031] x 6 \r\n\thttp://localhost:8080/challenge/0 (200 OK)\r\n\thttp://localhost:8080/challenge/0 (200 OK)\r\n\thttp://localhost:8080/challenge/0 (200 OK)\r\n\thttp://localhost:8080/challenge/2 (200 OK)\r\n\thttp://localhost:8080/challenge/2 (200 OK)\r\nIGNORE: Non-Storable Content [10049] x 11 \r\n\thttp://localhost:8080 (200 OK)\r\n\thttp://localhost:8080/ (200 OK)\r\n\thttp://localhost:8080/challenge/0 (200 OK)\r\n\thttp://localhost:8080/challenge/1 (200 OK)\r\n\thttp://localhost:8080/challenge/2 (200 OK)\r\nIGNORE: Cookie without SameSite Attribute [10054] x 1 \r\n\thttp://localhost:8080/challenge/0 (200 OK)\r\nIGNORE: CSP: Wildcard Directive [10055] x 12 \r\n\thttp://localhost:8080 (200 OK)\r\n\thttp://localhost:8080/challenge/0 (200 OK)\r\n\thttp://localhost:8080/robots.txt (404 Not Found)\r\n\thttp://localhost:8080/sitemap.xml (404 Not Found)\r\n\thttp://localhost:8080 (200 OK)\r\nIGNORE: Permissions Policy Header Not Set [10063] x 11 \r\n\thttp://localhost:8080 (200 OK)\r\n\thttp://localhost:8080/ (200 OK)\r\n\thttp://localhost:8080/challenge/0 (200 OK)\r\n\thttp://localhost:8080/challenge/1 (200 OK)\r\n\thttp://localhost:8080/challenge/2 (200 OK)\r\nIGNORE: Modern Web Application [10109] x 11 \r\n\thttp://localhost:8080 (200 OK)\r\n\thttp://localhost:8080/ (200 OK)\r\n\thttp://localhost:8080/challenge/0 (200 OK)\r\n\thttp://localhost:8080/challenge/1 (200 OK)\r\n\thttp://localhost:8080/challenge/2 (200 OK)\r\nIGNORE: Dangerous JS Functions [10110] x 1 \r\n\thttp://localhost:8080/webjars/jquery/3.6.3/jquery.js (200 OK)\r\nIGNORE: Loosely Scoped Cookie [90033] x 1 \r\n\thttp://localhost:8080/challenge/0 (200 OK)\r\n```\r\n\r\n### Any possible solutions?\r\n\r\nNeeds further investigation per issue.\r\n\r\n### If the bug is confirmed, would you be willing to submit a PR?\r\n\r\nYes\r\n", + "summary": "Investigate and address the findings from the ZAP baseline scan. Review the full ZAP report located in GitHub Actions. Each identified issue requires examination to determine if a fix is necessary or if it can be ignored.\n\n### Issues to Address:\n- **Information Disclosure - Suspicious Comments [10027]**: Review comments in the listed JavaScript files. Determine if sensitive information is being exposed.\n- **User Controllable HTML Element Attribute (Potential XSS) [10031]**: Analyze the specified endpoints for potential XSS vulnerabilities due to user input affecting HTML attributes.\n- **Non-Storable Content [10049]**: Assess the content that cannot be stored and evaluate if it impacts functionality or security.\n- **Cookie without SameSite Attribute [10054]**: Ensure cookies have the SameSite attribute set to prevent cross-site request forgery attacks.\n- **CSP: Wildcard Directive [10055]**: Modify Content Security Policy to eliminate wildcard directives for better security.\n- **Permissions Policy Header Not Set [10063]**: Implement the Permissions-Policy header to restrict features in the application.\n- **Modern Web Application [10109]**: Review compliance with modern web standards and practices.\n- **Dangerous JS Functions [10110]**: Identify and mitigate any risks associated with the use of dangerous JavaScript functions.\n- **Loosely Scoped Cookie [90033]**: Tighten cookie scope to enhance security.\n\n### Technical Steps:\n1. Access the ZAP report in GitHub Actions.\n2. For each issue, reproduce the findings locally for further analysis.\n3. Document the findings for each issue, noting whether a fix is required or if it can be ignored.\n4. Implement necessary fixes or updates in the application code.\n5. Test changes to ensure vulnerabilities are resolved.\n6. Prepare a pull request (PR) with all modifications for review.\n\nConfirm your willingness to submit a PR upon verification of the issues.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/709", @@ -8869,11 +9180,12 @@ "model": "github.issue", "pk": 312, "fields": { - "nest_created_at": "2024-09-11T19:41:47.000Z", - "nest_updated_at": "2024-09-11T19:41:47.000Z", + "nest_created_at": "2024-09-11T19:41:47Z", + "nest_updated_at": "2024-09-11T19:41:47Z", "node_id": "I_kwDOETRnAc5hKLH7", "title": "New Challenge: use weak KDF to protect a secret", "body": "### Context\r\n\r\nThis is a Docker challenge focused on using the wrong KDF to protect a secret. \r\nIn crypto-js there is an AES encryption mechanism, which uses MD5 as its KDF. This library is often used on mobile for encryption in hybrid apps. So what if we make a challenge in which the user has to find the right \"pin\"to be able to decrypt a secret offered on screen? (E.g. a 4-8 digit pin with md5 based KDF, and a secret fitting in 128 bytes.\r\nWe need to relate it to the MSTG on how to use (P)KDF with additional entropy and contextual binding.\r\n\r\n### Did you encounter this in real life? Could you tell us more about the scenario?\r\nSee https://github.com/brix/crypto-js/blob/c8a2312474ae60c823f3c00b4d7aac2da460bbfc/test/config-test.js for test defaults.\r\n\r\n", + "summary": "Create a Docker challenge that utilizes a weak Key Derivation Function (KDF) to protect a secret. Implement AES encryption using the crypto-js library, which employs MD5 as its KDF. The challenge should require users to discover the correct \"pin\" (a 4-8 digit numeric code) to decrypt a secret displayed on the screen, ensuring the secret does not exceed 128 bytes.\n\n1. Set up a Docker environment that includes the necessary dependencies for crypto-js.\n2. Implement the encryption mechanism using MD5 as the KDF for AES encryption.\n3. Generate a secret and encrypt it using the established mechanism.\n4. Design a user interface that prompts participants to input their guessed pin.\n5. Validate the pin against the KDF and provide feedback on success or failure.\n6. Reference the Mobile Security Testing Guide (MSTG) for guidelines on utilizing (P)KDF with additional entropy and contextual binding to deepen the challenge's complexity.\n\nFocus on ensuring that the challenge clearly illustrates the pitfalls of using weak KDFs in cryptographic implementations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/713", @@ -8904,6 +9216,7 @@ "node_id": "I_kwDOETRnAc5hQIIF", "title": "Optimize Github actions", "body": "In order to become more efficient in CLI time there are a bunch of things we can simplify:\r\n- [ ] 1. create 1 clean-install with all the sskips in place materials to have the jar file\r\n- [ ] 2. Change swagger action to depend on 1\r\n- [ ] 3. Change DAST to depend on 1\r\n- [ ] 4. Change Docker container creation action to depend on1\r\n- [ ] Optimize many of the github actions to just run once instead of twice during a PR.\r\n- [ ] quickfail container test if something is wrong...", + "summary": "Optimize GitHub Actions for increased efficiency in CLI time. Follow these steps:\n\n1. **Create a Clean Install**: Implement a single clean installation action that incorporates all necessary skips to generate the JAR file efficiently.\n \n2. **Modify Swagger Action**: Adjust the Swagger action to depend on the newly created clean install action to streamline the process.\n\n3. **Revise DAST Dependencies**: Update the Dynamic Application Security Testing (DAST) action to rely on the clean install action, ensuring it runs only after the successful creation of the JAR file.\n\n4. **Refactor Docker Container Creation**: Change the Docker container creation action to depend on the clean install, minimizing redundant builds.\n\n5. **Optimize Action Runs**: Review and modify existing GitHub actions to ensure they execute only once during a Pull Request instead of twice, reducing unnecessary processing time.\n\n6. **Implement Quick Fail for Container Tests**: Enhance the container testing process to include a quick fail mechanism that halts the test if any issues are detected early on.\n\nInitiate the optimization by prioritizing the creation of the clean install action, followed by updating dependencies for Swagger, DAST, and Docker actions. Then, assess the current action runs and implement the quick fail feature in the container tests.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/714", @@ -8934,6 +9247,7 @@ "node_id": "I_kwDOETRnAc5lTn1o", "title": "Nexus deployment credentials in settings.xml", "body": "This challenge is about packing and explaining why you should not have nexus deployment credentials in your github project hardcoded.\r\nHowever, we will not have an actual nexus setup right now. \r\n\r\nTODO's:\r\n\r\n- [x] - Create a settings.xml for Maven to be able to connect to an imaginary Nexus repo See https://github.com/sonatype/nexus-book-examples/blob/master/maven/settings/settings.xml as an example.\r\n- [x] - Implement a challenge that reads the actual challenge secret value from the settings.xml (see contributing.md for more details)", + "summary": "Create a `settings.xml` file for Maven to connect to a hypothetical Nexus repository without hardcoding deployment credentials in your GitHub project. Reference the example from the Sonatype Nexus book [here](https://github.com/sonatype/nexus-book-examples/blob/master/maven/settings/settings.xml). \n\nImplement a challenge that securely reads the challenge secret value from `settings.xml`. Follow the guidelines in `contributing.md` for specifics on reading the secret value.\n\nFirst steps to tackle the problem:\n1. Design the `settings.xml` structure, ensuring it contains placeholders for credentials that are not included directly in the GitHub repository.\n2. Develop a mechanism to read the secret credential values from the `settings.xml` during the build process, potentially utilizing environment variables or secure storage solutions like GitHub Secrets.\n3. Write documentation explaining the importance of not hardcoding sensitive information in source code, focusing on security best practices.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/810", @@ -8965,6 +9279,7 @@ "node_id": "I_kwDOETRnAc5lTp8Y", "title": "Add misconfiguration of docker secret in code (See for docker compose: https://docs.docker.com/engine/swarm/secrets/)", "body": "Create a challenge for docker compose setup, where the compose secret is hardcoded inside teh docker compose yml\r\n\r\nTODO:\r\n- [ ] Implement a docker-compose setup for wrongsecrets with the secret as an env var, add the compose file to the repo\r\n- [ ] implement the challenge according to contributing.md, but make sure we read the challenge from the file so we can play the challenge without the need to actually use teh docker-compose setup.", + "summary": "Create a Docker Compose challenge by misconfiguring Docker secrets in the `docker-compose.yml` file. Specifically, hardcode the secret as an environment variable within the Docker Compose setup. \n\nFollow these steps:\n\n1. **Implement Docker Compose Setup:**\n - Create a `docker-compose.yml` file that includes a service with a secret improperly defined as an environment variable.\n - Reference the Docker documentation for secrets to ensure the setup is clear: [Docker Secrets Documentation](https://docs.docker.com/engine/swarm/secrets/).\n\n2. **Add Compose File to Repository:**\n - Commit the `docker-compose.yml` file containing the misconfiguration to the GitHub repository.\n\n3. **Implement the Challenge:**\n - Follow the guidelines outlined in `contributing.md` for creating challenges.\n - Ensure the challenge can be executed without the need for the Docker Compose setup by reading the challenge from a file.\n\n4. **Test the Challenge:**\n - Verify that the challenge accurately reflects the misconfiguration situation and is playable as intended.\n\nThis will provide an educational challenge for users to learn about Docker secrets and their correct implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/811", @@ -8996,6 +9311,7 @@ "node_id": "I_kwDOETRnAc5lTqhP", "title": "Add misconfiguration for mounting in secret in during build: https://docs.docker.com/engine/reference/commandline/buildx_build/", "body": "This challenge is about using docker secrets from docker buildx buildpacks:\r\n\r\nUse the --secret, but then with a hardcoded value referenced in the shell script to publish the docker container and explain that using --secret is a good idea, but not with a hardcoded call in a git-comitted buildscript.\r\n\r\nTodo:\r\n- [ ] Embed the secret variable in https://github.com/OWASP/wrongsecrets/blob/master/.github/scripts/docker-create.sh and make sure it lands in a file in the docker container\r\n- [ ] create a challenge that reads the secret from that file and teaches why this is a bad idea (See contributing.md)", + "summary": "Implement misconfiguration for mounting secrets during Docker build with buildx. Reference the Docker build documentation for proper usage of the `--secret` flag.\n\n1. Modify the script located at `https://github.com/OWASP/wrongsecrets/blob/master/.github/scripts/docker-create.sh` to embed a secret variable using the `--secret` option.\n2. Ensure that the secret is written to a file inside the Docker container during the build process.\n3. Create a challenge that reads the secret from the specified file, demonstrating the risks associated with hardcoding secrets in version-controlled build scripts.\n\nFirst steps:\n- Review the current script to identify where secrets are being handled.\n- Integrate the `--secret` flag into the build command.\n- Create a temporary file in the Docker container to store the secret.\n- Develop a separate challenge that educates users on the dangers of hardcoding secrets, in accordance with the guidelines outlined in `contributing.md`.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/812", @@ -9025,6 +9341,7 @@ "node_id": "I_kwDOETRnAc5wlGJO", "title": "Have a challenge with a backup bucket containing the secret", "body": "### Context\r\n\r\n- What should the challenge scenario be like?\r\nHave a backup s3/storage bucket with a private ed25519 key publicly exposed \r\n- What should the participant learn from completing the challenge?\r\nSecure your backup at all cost\r\n- For what category would the challenge be? (e.g. Docker, K8s, binary)\r\nDocker/cloud depending on how we implement the backup solution\r\n\r\nActions:\r\n- [ ] create separate Terraform folder to have an S3 bucket (in our AWS folder) under the name \"backupchallenge\"\r\n- [ ] have the key copying logic in a shell script using AWS CLI as part of the backupchallenge folder\r\n- [ ] implement the challenge according to contributing.md and make sure you hide the key in your classfile.\r\n", + "summary": "**Create a Backup Challenge for Secure Storage of Secrets**\n\n1. **Define the Challenge Scenario**: Set up a backup S3/storage bucket that contains a private ed25519 key, which is intentionally exposed to test security practices.\n\n2. **Establish Learning Objectives**: Ensure participants understand the importance of securing backup data and implementing best practices for secret management.\n\n3. **Categorize the Challenge**: Classify the challenge under Docker/cloud technologies, based on the implementation of the backup solution.\n\n**Next Steps**:\n\n- **Create Infrastructure**: \n - Set up a new folder in Terraform specifically for this challenge.\n - In this folder, create an S3 bucket named \"backupchallenge\" within the existing AWS folder structure.\n\n- **Implement Backup Logic**:\n - Develop a shell script that utilizes the AWS CLI to handle the logic for copying the private key into the S3 bucket as part of the backup process.\n\n- **Follow Contribution Guidelines**: \n - Implement the challenge as outlined in the contributing.md document.\n - Ensure that the private key is hidden in your class file to prevent exposure.\n\nBy following these steps, you will create a valuable learning experience focused on securing sensitive information in cloud storage.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/982", @@ -9054,6 +9371,7 @@ "node_id": "I_kwDOETRnAc54_uwV", "title": "performance for the main screen load", "body": "Improve the performance for the main screen loading by:\r\n\r\n- [ ] pre-compiling the content for the table as much as possible\r\n- [ ] Have a minification step for all the js/css offered (see https://pagespeed.web.dev/analysis/https-wrongsecrets-herokuapp-com/xltdvc76x1?form_factor=mobile)\r\n- [ ] find a way to strip css for bootstrap that we will never use?\r\n- [ ] load all computed integers from cache (max score)\r\n- [ ] stop blocking of rendering\r\n- [ ] enable virtual threads", + "summary": "Enhance the main screen loading performance by implementing the following steps:\n\n1. Pre-compile content for the table to reduce rendering time. Investigate the use of static site generation or server-side rendering techniques to deliver pre-rendered HTML.\n\n2. Implement a minification step for all JavaScript and CSS files. Utilize tools like UglifyJS for JS and CSSNano for CSS to minimize file sizes, which can decrease loading times. Reference the PageSpeed insights for further optimization.\n\n3. Analyze Bootstrap usage and identify unused CSS. Employ tools such as PurgeCSS to remove unused styles from the production build, thereby reducing the overall CSS footprint.\n\n4. Optimize caching strategies for computed integers. Investigate caching mechanisms (e.g., Redis or in-memory caching) to store frequently accessed data and minimize computation on subsequent loads.\n\n5. Review the rendering pipeline to eliminate any blocking resources that hinder page rendering. Consider deferring non-essential JavaScript and utilizing asynchronous loading techniques.\n\n6. Explore enabling virtual threads to improve concurrency and resource management within the application. Investigate the compatibility of your current environment with Project Loom or similar threading models.\n\nBegin by profiling the existing loading performance using tools such as Google Lighthouse or WebPageTest to establish a baseline and identify specific bottlenecks. Then, prioritize the above tasks based on their potential impact on performance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/1115", @@ -9083,6 +9401,7 @@ "node_id": "I_kwDOETRnAc5-lrGO", "title": "Kubernetes ephemeral container to extract secret cached by subprocess", "body": "### Context\r\n\r\n- What should the challenge scenario be like?\r\nWe're interfacing with a secrets mgmt system to keep our secret safe, but we don't want to make a network call every time we use the secret. We've built an abstraction to handle interfacing with the system, and for performance reasons, we're caching the secret in memory using a spawned subprocess (or in memory). Using an appropriate container with debugging tools (jmap?), we can extract the secret from the subprocess/the heap!\r\n\r\n- What should the participant learn from completing the challenge?\r\nBeing able to exec in prod can harm even relatively safe secrets. Also, be careful with debug modes 🤡 \r\n\r\n- For what category would the challenge be? (e.g. Docker, K8s, binary)\r\nK8s\r\n\r\n### Did you encounter this in real life? Could you tell us more about the scenario?\r\n\r\nI had to attach a debug ephemeral container to a running one, which had multiple debugging tools installed.\r\n\r\n### If the challenge request is approved, would you be willing to submit a PR?\r\n\r\nYes\r\n", + "summary": "Create a Kubernetes ephemeral container to extract a secret cached by a subprocess. \n\n### Technical Details\n- Design a challenge scenario where a secrets management system is interfaced to retrieve secrets, avoiding multiple network calls by caching secrets in a subprocess.\n- Implement an ephemeral container equipped with debugging tools (e.g., `jmap`) to access the subprocess's memory and extract the cached secret.\n- Highlight the risks of executing commands in a production environment, emphasizing the potential exposure of even safe secrets and the implications of enabling debug modes.\n\n### First Steps\n1. Define the requirements for the ephemeral container, including necessary debugging tools.\n2. Set up a Kubernetes environment to deploy the application with the caching mechanism.\n3. Enable ephemeral containers in the Kubernetes cluster configuration.\n4. Develop a procedure to attach the ephemeral container to the running application pod.\n5. Test the extraction of the cached secret using the debugging tools from the ephemeral container.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/1236", @@ -9113,6 +9432,7 @@ "node_id": "I_kwDOETRnAc5_7_Qc", "title": "Have a .net dll based RE challenge!", "body": "### Context\r\n\r\nDLLs and exes can be hard to debug. What about hidden secrets over there?\r\n\r\nCan we have a challenge whiich is cross-compiled using .Net runtime and a few DLLs to have a secret inside similar like the golang/rust challenges?\r\n\r\n- [x] Add .net based binary to the [wrongsecrets/binaries ](https://github.com/OWASP/wrongsecrets-binaries)repowith crosscompiling for the various OSes\r\n- [ ] Add a challenge here that uses the binary (See contributing.md and the code of the other binary challenges).", + "summary": "Create a .NET DLL-based reverse engineering challenge. \n\n1. Cross-compile a .NET binary using the .NET runtime for multiple operating systems.\n2. Integrate the compiled DLL into the [wrongsecrets/binaries](https://github.com/OWASP/wrongsecrets-binaries) repository.\n3. Develop a challenge that utilizes the binary, following the guidelines in contributing.md and examining the structure of existing binary challenges.\n\nFirst steps:\n- Set up a development environment with .NET SDK.\n- Use `dotnet publish` to cross-compile the binary for different OSes.\n- Create a new challenge file that references the compiled DLL and includes the challenge description and solution.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/1243", @@ -9145,6 +9465,7 @@ "node_id": "I_kwDOETRnAc6GX1Qz", "title": "Goerli testnet is being deprecated: help us migrate to another testnet!", "body": "Challenge 25, 26, and 27 are based on the Goerli testnet, which is being deprecated. We need to redo these challenges on another testnet.", + "summary": "Migrate challenges 25, 26, and 27 from the deprecated Goerli testnet to a new testnet. \n\n1. Identify a suitable alternative testnet, such as Sepolia or Mumbai.\n2. Update the smart contracts and deployment scripts to be compatible with the chosen testnet.\n3. Modify any relevant configurations in the project to point to the new testnet.\n4. Test the deployment and functionality of the challenges on the new testnet.\n5. Document the migration process for future reference. \n\nEnsure all dependencies and libraries are compatible with the selected testnet during the migration.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wrongsecrets/issues/1360", @@ -9171,10 +9492,11 @@ "pk": 322, "fields": { "nest_created_at": "2024-09-11T19:43:35.817Z", - "nest_updated_at": "2024-09-12T00:14:58.204Z", + "nest_updated_at": "2024-09-13T17:00:49.923Z", "node_id": "I_kwDOERPzhs6EXuue", "title": "Field \"Vulnerability Mapping\"", "body": "Hi,\r\n\r\nIt is normal that I do not found the field **Vulnerability Mapping** in the JSON returned?\r\n\r\n👀 View from the web:\r\n![image](https://github.com/OWASP/cwe-tool/assets/1573775/35fdbc3c-c48d-4820-8431-da2f47a848ef)\r\n\r\n👀 View from the JSON:\r\n![image](https://github.com/OWASP/cwe-tool/assets/1573775/0110bf3b-04c5-4c36-a345-70bcce48df54)\r\n\r\n📖 JSON file:\r\n[cwe.json](https://github.com/OWASP/cwe-tool/files/14839275/cwe.json)\r\n\r\n💻 Version used:\r\n![image](https://github.com/OWASP/cwe-tool/assets/1573775/d44efe13-0c23-4b3c-a982-fb4deeae2da6)\r\n\r\n🤔 Did I miss something?\r\n\r\n😉 Thanks a lot in advance for your help.\r\n\r\n\r\n", + "summary": "The issue raised concerns the absence of the \"Vulnerability Mapping\" field in the JSON response compared to what is displayed on the web interface. The user provided screenshots for both views and referenced a specific JSON file for further context. They are seeking clarification on whether this omission is expected or if there is something they might have overlooked. \n\nTo resolve this, it may be necessary to investigate the JSON structure and confirm if the \"Vulnerability Mapping\" field should indeed be included or if there are updates required to align the JSON output with the web display.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/cwe-tool/issues/29", @@ -9197,10 +9519,11 @@ "pk": 323, "fields": { "nest_created_at": "2024-09-11T19:44:18.875Z", - "nest_updated_at": "2024-09-11T19:44:18.875Z", + "nest_updated_at": "2024-09-13T15:15:55.661Z", "node_id": "I_kwDOEQbdus5_PwOh", "title": "Website shows ancient WIP Draft Chapter Policy", "body": "When browsing on the OWASP website (owasp.org) and you browse to chapter policies\r\n\r\nAbout->Policies\r\n\r\nThen select [Chapters Policy](https://owasp.org/www-policy/operational/chapters)\r\n\r\nYou will notice on the top right a \"next\" selection entitled \"Chapters Policy - Draft (WIP)\"\r\n\r\nWhen selecting this entry you will be sent to a page that is out of date and states:\r\n\r\n\"Members are invited to provide feedback on this draft policy until January 22, 1970\"\r\n\r\nThis page needs to be removed or at least updated.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-chapter/issues/24", @@ -9227,6 +9550,7 @@ "node_id": "MDU6SXNzdWU4OTE5Nzg5MzE=", "title": "Integrate threat engine with Cornucopia / EoP cards", "body": "TD suggests STRIDE when adding threats to the data flow diagram, and one idea is that when one of STRIDE categories is suggested by TD, then the default description could have a link to the specific EoP suit (so for example if it is Repudiation then we could link to the EoP Repudiation suit).\r\n\r\nThis would need the EoP to be split out into the individual suits (Spoofing, Tampering, Repudiation, Information disclosure, Denial of service, Elevation of privilege), subject of https://github.com/adamshostack/eop/issues/3", + "summary": "Integrate the threat engine with Cornucopia and EoP cards. Use STRIDE methodology for adding threats to the data flow diagram. When a STRIDE category is identified by the threat detection (TD), implement a default description that links to the corresponding EoP suit. For instance, if the threat is Repudiation, link it to the EoP Repudiation suit.\n\nFirst steps to tackle the problem:\n\n1. Review STRIDE categories and their definitions.\n2. Identify the EoP suits (Spoofing, Tampering, Repudiation, Information Disclosure, Denial of Service, Elevation of Privilege).\n3. Split the EoP content into individual suits as mentioned in issue #3 of the EoP repository.\n4. Develop a mapping mechanism that connects STRIDE threats to the appropriate EoP suits.\n5. Update the data flow diagram generation logic to include these links in the default descriptions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/140", @@ -9241,13 +9565,13 @@ "author": 43, "repository": 502, "assignees": [ - 43, - 113 + 113, + 43 ], "labels": [ - 71, 72, - 73 + 73, + 71 ] } }, @@ -9260,6 +9584,7 @@ "node_id": "MDU6SXNzdWU5Mzk1NjU3NzY=", "title": "Add templates to the list of sample threat models", "body": "Add feature to import a threat template or a list of pre defined threats. Seems ... chaotic to have to re-add threats for every model that you design. especially if designing a lot of threat models. When reusing components/elements, you will encounter the same threats... this tool should scale to reuse predefined components/elements and threats. Process of generating threat list is way too manual.", + "summary": "Add templates for sample threat models to streamline threat modeling. Implement an import feature for threat templates or predefined threat lists to reduce redundancy in threat definitions across multiple models. \n\n1. Analyze the current threat modeling process to identify repetitive tasks related to threat addition.\n2. Design a user-friendly interface for importing threat templates.\n3. Develop a backend mechanism to store and retrieve these templates efficiently.\n4. Ensure the system allows for easy integration of predefined threats with existing models.\n5. Create documentation to guide users on how to utilize the new import feature and templates.\n\nFocus on enhancing scalability and reducing manual effort in generating threat lists.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/220", @@ -9275,8 +9600,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 73 + 73, + 71 ] } }, @@ -9289,6 +9614,7 @@ "node_id": "MDU6SXNzdWU4OTE5Nzc3MTI=", "title": "Threat generation for more than one element", "body": "**Is your feature request related to a problem? Please describe.**\r\nIt's frustrating that you have to choose each element and generate threats for each and everyone of them.\r\n\r\n**Describe the solution you'd like**\r\nAbility to choose more than one, possibly all elements and generate threats. That way I could generate the standard threats, which should be put in an editable library btw, and then get the responsible developers to do a review. In that way it's a great tool for awareness and learning as well.\r\n\r\n**Describe alternatives you've considered**\r\nAt the moment I've made a list of all standard mitigations and \"threat descriptions\" in the form of CAPEC and/or CWE references in the project. I draw the model, go through all of the elements and generate standard threats, then go through the json file to manually search and replace.\r\n\r\n**Additional context**\r\nNope\r\n\r\n", + "summary": "Implement a feature that allows users to select multiple elements simultaneously for threat generation. \n\n1. Identify the current process of selecting individual elements and generating threats for each.\n2. Develop a multi-selection interface in the user interface to allow bulk selection of elements.\n3. Modify the threat generation algorithm to process multiple elements at once, producing a comprehensive list of threats.\n4. Create an editable library for standard threats so users can customize and categorize threats as needed.\n5. Implement a review mechanism for developers to evaluate the generated threats against their specific elements.\n\nConsider creating a prototype for the multi-selection UI and testing it with a subset of elements to refine the user experience.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/132", @@ -9304,9 +9630,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, 72, - 73 + 73, + 71 ] } }, @@ -9315,10 +9641,11 @@ "pk": 327, "fields": { "nest_created_at": "2024-09-11T20:08:07.376Z", - "nest_updated_at": "2024-09-11T20:08:07.376Z", + "nest_updated_at": "2024-09-13T03:53:38.188Z", "node_id": "MDU6SXNzdWU4NjYyODA3OTU=", "title": "Support Azure OAuth", "body": "**Describe what problem your feature request solves**\r\nAdd ability to use multiple passport connectors, such as Azure AD.\r\n\r\n**Describe the solution you'd like**\r\nI'd like to add the Azure AD Passport connector for authentication/authorization. I _think_ it would make the most sense to have this be enabled via configuration, and also allow for multiple passport strategies. For example: `export PASSPORT_STRATEGIES=\"['github', 'azuread-openidconnect']\"` or `export PASSPORT_STRATEGIES=\"['github']\"`\r\nThis would open up the possibility of adding other passport strategies/connectors in the future.\r\n\r\n\r\n**Additional context**\r\nSome questions I have:\r\n- Because Azure AD requires a bunch more config options, would it make sense to implement something like [dotenv](https://github.com/motdotla/dotenv) to make configuration a bit easier?\r\n - If so, should that be a separate feature / PR to keep the scope somewhat limited?\r\n- Does it make sense to restructure how the passport authentication is configured to make it easier to add other authentication types in the future?\r\n\r\nI'm eager to add this functionality, but wanted to see if it's a desired feature, and what the appetite is as far as the size and scope of pull requests.", + "summary": "Add Azure OAuth support by implementing the Azure AD Passport connector for authentication and authorization. Enable configuration for multiple passport strategies, allowing for flexibility in authentication methods. Use environment variable `PASSPORT_STRATEGIES` to specify active strategies, e.g., `export PASSPORT_STRATEGIES=\"['github', 'azuread-openidconnect']\"`.\n\nConsider the following steps to tackle the problem:\n\n1. **Research Azure AD Integration**: Review Azure AD documentation to understand required configuration and authentication flows.\n \n2. **Implement Configuration Management**: Decide whether to integrate a solution like `dotenv` for easier configuration management. If chosen, create a separate issue or PR to maintain focus.\n\n3. **Refactor Passport Authentication Setup**: Analyze the current passport authentication setup and restructure it to facilitate the addition of new strategies in the future.\n\n4. **Develop and Test**: Code the Azure AD Passport connector, ensuring it adheres to existing frameworks and conventions. Conduct thorough testing for both Azure AD and existing strategies.\n\n5. **Documentation**: Update documentation to include instructions for configuring Azure AD and using multiple passport strategies.\n\nEngage with the community to gauge interest and feedback on the feature's scope before proceeding with the implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/61", @@ -9329,16 +9656,13 @@ "comments_count": 14, "closed_at": null, "created_at": "2021-04-23T16:55:51Z", - "updated_at": "2024-03-23T18:47:48Z", + "updated_at": "2024-09-12T08:01:18Z", "author": 136, "repository": 502, - "assignees": [ - 137 - ], + "assignees": [], "labels": [ 71, - 74, - 75 + 74 ] } }, @@ -9351,6 +9675,7 @@ "node_id": "MDU6SXNzdWU4NzA5MDAxMjE=", "title": "provide API for CI/CD Pipelines", "body": "**Describe what problem your feature request solves**\r\nProvide an API for CI/CD pipelines\r\n\r\n**Describe the solution you'd like**\r\nProvide an API for CI/CD pipelines, [see here](https://github.com/bbachi/vuejs-nodejs-example/tree/master/api) for an example\r\n\r\n**Additional context**\r\n- What functions are exposed for this API?\r\n- How do we handle authentication/authorization?\r\n- What do we use to document the API? (main github docs, auto-generated via swagger or apidoc?)\r\n", + "summary": "Implement an API for CI/CD pipelines. \n\n1. Define the problem of lacking an API for seamless CI/CD integration.\n2. Reference the provided example API [here](https://github.com/bbachi/vuejs-nodejs-example/tree/master/api) for guidance on structure and functionality.\n3. Identify the essential functions to expose through the API, such as triggering builds, retrieving build statuses, and managing pipeline configurations.\n4. Determine the approach for handling authentication and authorization—consider OAuth2 or API tokens for secure access.\n5. Decide on documentation methods. Evaluate options like using the main GitHub documentation, or auto-generating documentation with tools like Swagger or Apidoc.\n\nFirst steps:\n- Conduct a requirements analysis to finalize the API endpoints.\n- Set up a project structure and choose a technology stack (e.g., Node.js, Express).\n- Implement basic authentication and authorization mechanisms.\n- Begin developing the API endpoints, starting with the most critical functionalities.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/88", @@ -9366,8 +9691,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 76 + 76, + 71 ] } }, @@ -9380,6 +9705,7 @@ "node_id": "MDU6SXNzdWU5Mjc4NDAyMTg=", "title": "Text Search", "body": "**Describe what problem your feature request solves**\r\nAbility to search for elements based on user-defined text (element type, description, etc)\r\n\r\n**Describe the solution you'd like**\r\nWhen viewing a diagram, I'd like a search option to be able to find only UI elements that include the given search term. \r\n\r\n**Additional context**\r\nTo start with, this can be a simple text search that searches all element types and all fields but expanded on later for more \"advanced\" search features (only certain types, fields, etc).\r\n\r\nThis improvement was suggested by Simon S \r\n\r\nIt would be good to be able to search for any text within the model\r\nNote that the search box has been temporarily removed via merge #502 \r\n", + "summary": "Implement a text search feature for diagrams to enable users to locate UI elements based on user-defined text criteria such as element type and description. \n\n1. Create a search input field within the diagram view.\n2. Implement a basic search function that scans all element types and fields for the specified search term.\n3. Ensure the search results highlight or filter the UI elements that match the search criteria.\n4. Consider future enhancements, such as limiting searches to specific element types or fields for a more refined search experience.\n\nNote that the search box is currently absent due to merge #502, so address any compatibility issues during implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/194", @@ -9395,8 +9721,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 75 + 75, + 71 ] } }, @@ -9409,6 +9735,7 @@ "node_id": "I_kwDOEAWEP85DBN_r", "title": "MITRE Att&ck integration with STRIDE threats", "body": "Describe what problem your feature request solves:\r\nCurrently the tool does a good job for decomposing the architecture and applying threats according to the STRIDE framework. This still leaves some gaps to determine the cybersecurity requirements that the process needs to adhere to remediate identified threats.\r\n\r\nDescribe the solution you'd like:\r\nThe solution I am proposing is that we can create an additional layer of taxonomy of threats under STRIDE to identify the attackers tactics and techniques which might be used to identify the right defenses. These can become the cybersecurity requirements for the process that we are threat modelling.\r\n\r\nThis combined process for threat modeling can be like:\r\nThe first step is to identify the process, map out the dataflows and interactions between them and the trust boundaries.[Threat dragon is capable of this]\r\n\r\nSecond, for each of the subsystems, enumerate a STRIDE matrix listing the mnemonics. \r\nThird, the 12 ATT&CK tactics are tallied. Enumerated tactics are:\r\n• Initial Access\r\n• Execution\r\n• Persistence\r\n• Privilege Escalation\r\n• Defense Evasion\r\n• Credential Access\r\n• Discovery\r\n• Lateral Movement\r\n• Collection\r\n• Command and Control\r\n• Exfiltration\r\n• Impact\r\n\r\nIn Step 4, for each of the tactics within each of the STRIDE mnemonics, the applicable techniques are evaluated. For instance, for the STRIDE mnemonic of spoofing, the 12 tactics are evaluated for ATT&CK threat techniques that could result in spoofing against authenticity. In other words, Steps 2 through 4 are a process of elimination. \r\n\r\nAdvantages:\r\n1. Using consistent semantics and vocabulary to communicate threats.\r\n2. Understanding the adversary tactics which helps visualize the defenses to those threats.\r\n3. Use open source framework created by MITRE to educate the development teams for various threats and associated remediations.\r\n4. Identify cybersecurity requirements that help defend against multiple threats. [e.g] Preventing Initial access can remediate other threats that are not directly related. \r\n\r\n\r\nReferences:\r\nhttps://blog.isc2.org/isc2_blog/2020/02/under-attack-how-mitres-methodology-to-find-threats-and-embed-counter-measures-might-work-in-your-or.html\r\n![image](https://user-images.githubusercontent.com/96544391/152564903-135ece7a-6ea5-4840-907f-77f90c7aeb6c.png)\r\n\r\n\r\n", + "summary": "Integrate MITRE ATT&CK with STRIDE threats to enhance threat modeling capabilities. Address the gaps in identifying cybersecurity requirements for remediating threats by proposing an additional taxonomy layer under STRIDE.\n\n1. Identify the process and map dataflows and interactions, including trust boundaries. Utilize Threat Dragon for this task.\n2. Enumerate a STRIDE matrix for each subsystem, listing relevant mnemonics.\n3. Tally the 12 ATT&CK tactics: Initial Access, Execution, Persistence, Privilege Escalation, Defense Evasion, Credential Access, Discovery, Lateral Movement, Collection, Command and Control, Exfiltration, and Impact.\n4. Evaluate applicable techniques for each tactic within the STRIDE mnemonics, focusing on techniques that could lead to specific threats, such as spoofing.\n\nAdvantages include:\n- Establishing a consistent vocabulary for discussing threats.\n- Enhancing understanding of adversary tactics and aligning defenses.\n- Utilizing the MITRE framework to educate development teams on threats and remediations.\n- Identifying cybersecurity requirements that provide broader defense mechanisms.\n\nStart by mapping current processes and identifying areas where the integration can be implemented effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/361", @@ -9424,8 +9751,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 77 + 77, + 71 ] } }, @@ -9438,6 +9765,7 @@ "node_id": "I_kwDOEAWEP85DVzt0", "title": "desktop command line interface", "body": "**Describe what problem your feature request solves**\r\nVersion 1.x of Threat Dragon has a CLI - this needs to be migrated to version 2.0\r\n\r\n**Describe the solution you'd like**\r\nImplement CLI for version 2.0 using version 1.x as an example\r\n\r\n**Additional context**\r\nVersion 1.x used the [yargs package](https://www.npmjs.com/package/yargs) to supply the CLI\r\n", + "summary": "Migrate the Threat Dragon CLI from version 1.x to version 2.0. Utilize the existing CLI implementation in version 1.x as a reference for development.\n\n1. Review the CLI structure and commands in Threat Dragon version 1.x.\n2. Install the `yargs` package in version 2.0 to manage command-line arguments.\n3. Define the command structure and options for version 2.0, ensuring compatibility with new features.\n4. Implement the CLI commands, leveraging the existing logic from version 1.x where applicable.\n5. Test the new CLI implementation thoroughly to ensure all commands function as intended.\n\nConsider potential changes in the underlying architecture of version 2.0 that may affect the CLI functionality and address them during implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/372", @@ -9453,10 +9781,10 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 78, + 80, 79, - 80 + 78, + 71 ] } }, @@ -9469,6 +9797,7 @@ "node_id": "I_kwDOEAWEP85DzfMt", "title": "Resize Diagram or Export to PNG", "body": "**Describe what problem your feature request solves**\r\nI don't currently have a way to share a large threat model with non-git stakeholders.\r\n\r\n**Describe the solution you'd like**\r\nAs a Cloud Security Engineer, I would like the ability to export a threat model outside of threat dragon so that I can share a diagram with stakeholders that are not currently using threat dragon.\r\n\r\n**Additional context**\r\nMVP - Desktop version could use a resize function \r\nLong term - Exports options for different formats\r\n", + "summary": "Implement a feature to resize diagrams or export them as PNG files in Threat Dragon. This addresses the need for Cloud Security Engineers to share large threat models with non-Git stakeholders effectively.\n\n1. Develop a resizing function for diagrams in the desktop version of Threat Dragon, allowing users to adjust dimensions for better visibility and usability.\n2. Create an export feature that enables saving diagrams in PNG format, ensuring high-quality sharing options for stakeholders.\n3. Consider long-term enhancements by exploring additional export options for various formats (e.g., PDF, SVG) to cater to different user needs.\n\nBegin by reviewing the existing diagram rendering codebase to identify how resizing can be implemented. Create a prototype for the resizing functionality, then integrate the export feature for PNG format, ensuring compatibility with current diagram structures. Test the implementation thoroughly before deploying.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/385", @@ -9484,8 +9813,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 77 + 77, + 71 ] } }, @@ -9494,10 +9823,11 @@ "pk": 333, "fields": { "nest_created_at": "2024-09-11T20:08:19.449Z", - "nest_updated_at": "2024-09-11T20:08:19.450Z", + "nest_updated_at": "2024-09-13T15:17:01.472Z", "node_id": "I_kwDOEAWEP85Hd-OR", "title": "provide dark mode for desktop", "body": "**Describe what problem your feature request solves**\r\nIt would be good to allow the use of Dark Mode in the desktop. This is already provided in the web application by the browser\r\n\r\n**Describe the solution you'd like**\r\nFollow this tutorial : https://www.electronjs.org/docs/latest/tutorial/dark-mode\r\n\r\n**Additional context**\r\n", + "summary": "Implement dark mode for the desktop application. This feature will enhance user experience by providing a visually comfortable interface, similar to what is already available in the web application.\n\n1. Reference the Electron documentation on dark mode: https://www.electronjs.org/docs/latest/tutorial/dark-mode.\n2. Investigate how to integrate dark mode support into the existing desktop application.\n3. Update the application settings to allow users to toggle dark mode on and off.\n4. Test the implementation across different operating systems to ensure compatibility.\n5. Gather user feedback to refine and improve the dark mode feature.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/404", @@ -9508,12 +9838,10 @@ "comments_count": 4, "closed_at": null, "created_at": "2022-04-10T13:44:39Z", - "updated_at": "2024-07-08T17:01:39Z", + "updated_at": "2024-09-12T08:02:59Z", "author": 43, "repository": 502, - "assignees": [ - 130 - ], + "assignees": [], "labels": [ 71, 78 @@ -9529,6 +9857,7 @@ "node_id": "I_kwDOEAWEP85H7-Or", "title": "desktop about-box is MacOS only", "body": "**Describe what problem your feature request solves**\r\nThe desktop drop down menu role: 'about' is probably MacOS only\r\n\r\n**Describe the solution you'd like**\r\nAdd in code like this, to make it cross platform:\r\n```\r\n {\r\n label: 'about Electron',\r\n // Set menu roles\r\n role: 'about', // about, this value is only for Mac OS X systems\r\n // Click event is invalid when the role attribute of click event can be recognized\r\n click: () => {\r\n var aboutWin = new BrowserWindow({ width: 300, height: 200, parent: win, modal: true });\r\n aboutWin.loadFile('about.html');\r\n }\r\n },\r\n```\r\n\r\n**Additional context**\r\n", + "summary": "Implement cross-platform support for the desktop drop-down menu role: 'about'. Currently, this feature is exclusive to MacOS.\n\n**Proposed Solution:**\nIntegrate the following code snippet to ensure compatibility across different operating systems:\n\n```javascript\n{\n label: 'about Electron',\n role: 'about', // Note: This role is only recognized on MacOS\n click: () => {\n var aboutWin = new BrowserWindow({ width: 300, height: 200, parent: win, modal: true });\n aboutWin.loadFile('about.html');\n }\n},\n```\n\n**First Steps to Tackle the Problem:**\n1. Review the current implementation of the 'about' role in the Electron desktop application.\n2. Modify the menu creation logic to include a conditional check for the operating system.\n3. Create an `about.html` file if it doesn't exist, ensuring it contains relevant information about the application.\n4. Test the implementation on both MacOS and other operating systems to confirm functionality.\n5. Document the changes in the project’s README or relevant documentation to guide future contributions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/421", @@ -9544,10 +9873,10 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 78, 81, - 82 + 82, + 78, + 71 ] } }, @@ -9560,6 +9889,7 @@ "node_id": "I_kwDOEAWEP85JpJwG", "title": "Support storage backends other than github", "body": "**Describe what problem your feature request solves**\r\nInstead of using github repositories for storage of threat models, I'd like to store them all in an alternative centralized storage mechanism (I have a strong affinity to AWS, so s3 would be my preference)\r\n\r\n**Describe the solution you'd like**\r\nIntroduce a storage mechanism configuration and support options like which cloud provider (aws, azure, google) and any relevant configuration options for each one (for AWS, this would probably just be bucket name and region). Of course, this introduces a new dependency on IAM credentials for the service, which I'd say is an exercise left to the reader (I'd personally launch threat-dragon as a containerized workload in AWS and attach an IAM role, but others assumedly would want to set AWS_ACCESS_KEY_ID etc.)\r\n", + "summary": "Implement support for alternative storage backends beyond GitHub for threat models. Focus on integrating centralized storage solutions, specifically targeting AWS S3 as a primary option.\n\n1. **Identify Requirements**: Define the necessary configuration options for various cloud providers, including AWS, Azure, and Google Cloud. For AWS, specify parameters like bucket name and region.\n\n2. **Develop Configuration Interface**: Create a configuration interface that allows users to select their preferred cloud provider and input required settings. Ensure this interface can handle varying configurations based on the selected provider.\n\n3. **Manage IAM Credentials**: Address the dependency on IAM credentials. Offer guidance on best practices for credential management, such as using IAM roles for containerized deployments on AWS or environment variables for local setups.\n\n4. **Implement Storage Logic**: Develop the backend logic to interact with the selected cloud storage solution. This includes creating, reading, updating, and deleting threat models stored in S3 or other supported providers.\n\n5. **Testing and Validation**: Test the new storage backends thoroughly to ensure reliability and performance. Validate that the configurations work seamlessly across different cloud environments.\n\n6. **Documentation**: Update documentation to include instructions on how to configure and use the new storage options, including examples for each supported provider.\n\nBegin by reviewing the existing storage implementation, then outline the architectural changes needed to support multiple cloud providers.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/426", @@ -9577,9 +9907,9 @@ 141 ], "labels": [ - 71, 74, - 75 + 75, + 71 ] } }, @@ -9588,10 +9918,11 @@ "pk": 336, "fields": { "nest_created_at": "2024-09-11T20:08:26.685Z", - "nest_updated_at": "2024-09-11T20:08:26.685Z", + "nest_updated_at": "2024-09-13T03:53:41.018Z", "node_id": "I_kwDOEAWEP85NKG0E", "title": "Replace veux with pinia", "body": "**Describe what problem your feature request solves**\r\n[Vuex](https://vuex.vuejs.org/) is now in maintenance mode. It still works, but will no longer receive new features. It is recommended to use Pinia for new applications.\r\n\r\n**Describe the solution you'd like**\r\nReplace Veux with Pinia\r\n\r\n**Additional context**\r\nhttps://vuejs.org/guide/scaling-up/state-management.html#pinia\r\n", + "summary": "Replace Veux with Pinia to modernize state management in the application. Vuex is currently in maintenance mode and will not receive new features, making it imperative to transition to Pinia for ongoing support and enhancements.\n\n**First Steps:**\n1. Install Pinia via npm:\n ```bash\n npm install pinia\n ```\n2. Set up Pinia in your main application file, typically `main.js` or `main.ts`:\n ```javascript\n import { createPinia } from 'pinia';\n import { createApp } from 'vue';\n\n const app = createApp(App);\n const pinia = createPinia();\n app.use(pinia);\n app.mount('#app');\n ```\n3. Migrate existing Vuex store modules to Pinia stores. For each Vuex module, create a corresponding Pinia store using the `defineStore` function:\n ```javascript\n import { defineStore } from 'pinia';\n\n export const useMyStore = defineStore('myStore', {\n state: () => ({\n // state properties\n }),\n actions: {\n // action methods\n },\n getters: {\n // getter methods\n },\n });\n ```\n4. Replace Vuex store calls with Pinia store calls in your components. Update state access, actions, and getters accordingly.\n\n5. Test the application thoroughly to ensure that the migration did not introduce any regressions.\n\nBy following these steps, transition to Pinia will enhance performance and future-proof the application's state management.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/442", @@ -9602,15 +9933,12 @@ "comments_count": 2, "closed_at": null, "created_at": "2022-07-05T16:06:24Z", - "updated_at": "2024-08-23T09:49:54Z", + "updated_at": "2024-09-12T08:01:48Z", "author": 43, "repository": 502, - "assignees": [ - 142 - ], + "assignees": [], "labels": [ - 71, - 73 + 71 ] } }, @@ -9619,10 +9947,11 @@ "pk": 337, "fields": { "nest_created_at": "2024-09-11T20:08:28.727Z", - "nest_updated_at": "2024-09-11T20:08:28.727Z", + "nest_updated_at": "2024-09-13T03:53:42.053Z", "node_id": "I_kwDOEAWEP85NKI8Q", "title": "Replace Vue CLI with Vite", "body": "**Describe what problem your feature request solves**\r\n[Vue CLI](https://cli.vuejs.org/) is the official webpack-based toolchain for Vue. It is now in maintenance mode and ideally should be replaced with [Vite](https://vitejs.dev/) which will provide superior developer experience in most cases.\r\n\r\n**Describe the solution you'd like**\r\nReplace Vue CLI with Vite\r\n\r\n**Additional context**\r\nhttps://vuejs.org/guide/scaling-up/tooling.html#vue-cli\r\n", + "summary": "Replace Vue CLI with Vite to enhance the developer experience and leverage Vite's modern features. \n\n1. Assess the current project setup using Vue CLI, including configuration files, scripts, and dependencies.\n2. Install Vite by running `npm install vite --save-dev`.\n3. Update project structure to align with Vite's requirements. Create a `vite.config.js` file tailored for the project setup.\n4. Migrate existing Vue CLI configurations and plugins to Vite equivalents. This may involve replacing Webpack-specific code with Vite-compatible configurations.\n5. Adjust npm scripts in `package.json` to use Vite commands, replacing any Vue CLI commands.\n6. Test the application thoroughly to ensure all functionalities work post-migration.\n7. Document the migration process and update any relevant project documentation. \n\nRefer to the [Vite documentation](https://vitejs.dev/) for detailed guidance on configuration and setup.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/443", @@ -9633,15 +9962,12 @@ "comments_count": 0, "closed_at": null, "created_at": "2022-07-05T16:15:04Z", - "updated_at": "2024-08-23T09:49:54Z", + "updated_at": "2024-09-12T07:59:47Z", "author": 43, "repository": 502, - "assignees": [ - 142 - ], + "assignees": [], "labels": [ - 71, - 73 + 71 ] } }, @@ -9650,10 +9976,11 @@ "pk": 338, "fields": { "nest_created_at": "2024-09-11T20:08:30.910Z", - "nest_updated_at": "2024-09-11T20:08:30.910Z", + "nest_updated_at": "2024-09-13T03:53:43.063Z", "node_id": "I_kwDOEAWEP85NKNAl", "title": "Migrate to Vue 3", - "body": "**Describe what problem your feature request solves**\r\nVue 3 is the current, latest major version of Vue. It contains new features that are not present in Vue 2 (most notably Composition API), and also contains breaking changes that makes it incompatible with Vue 2.\r\nIn general, Vue 3 provides smaller bundle sizes, better performance, better scalability, and better TypeScript / IDE support. If starting a new project today, Vue 3 is the recommended choice.\r\n\r\n**Describe the solution you'd like**\r\n[Migrate to Vue 3](https://v3-migration.vuejs.org/)\r\nAlso make sure that vue-router is also updated (to something like ^4.1.0)\r\n\r\n**Additional context**\r\nhttps://vuejs.org/about/faq.html#what-s-the-difference-between-vue-2-and-vue-3\r\nPresent version of Vue is 2.6.11\r\n\r\nI18N will need to change: use the Vue I18n v9 [documentation](https://vue-i18n.intlify.dev/) instead.\r\n", + "body": "**Describe what problem your feature request solves**\r\nVue 3 is the current, latest major version of Vue. It contains new features that are not present in Vue 2 (most notably Composition API), and also contains breaking changes that makes it incompatible with Vue 2.\r\nIn general, Vue 3 provides smaller bundle sizes, better performance, better scalability, and better TypeScript / IDE support. If starting a new project today, Vue 3 is the recommended choice.\r\n\r\n**Describe the solution you'd like**\r\n[Migrate to Vue 3](https://v3-migration.vuejs.org/)\r\nAlso make sure that vue-router is also updated (to something like ^4.1.0)\r\n\r\n**Additional context**\r\nhttps://vuejs.org/about/faq.html#what-s-the-difference-between-vue-2-and-vue-3\r\nPresent version of Vue is 2.6.11\r\n\r\npackage `vue-template-compiler` will need to be updated\r\n\r\nI18N will need to change: use the Vue I18n v9 [documentation](https://vue-i18n.intlify.dev/) instead.\r\n", + "summary": "**Migrate to Vue 3**\n\n1. Upgrade the project to Vue 3 from the current version 2.6.11.\n2. Implement the Composition API for better scalability and performance.\n3. Update `vue-router` to version ^4.1.0 to ensure compatibility with Vue 3.\n4. Replace `vue-template-compiler` with the appropriate version that supports Vue 3.\n5. Transition I18N setup to use Vue I18n v9 as per the updated documentation.\n\n**First Steps:**\n\n- Review the [Vue 3 Migration Guide](https://v3-migration.vuejs.org/) to understand breaking changes and new features.\n- Assess current project structure and identify components that need refactoring to utilize the Composition API.\n- Update dependencies in `package.json`, specifically `vue`, `vue-router`, and `vue-template-compiler`.\n- Test the application extensively to identify any issues arising from the migration.\n- Update I18N implementation according to Vue I18n v9 documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/444", @@ -9664,12 +9991,10 @@ "comments_count": 1, "closed_at": null, "created_at": "2022-07-05T16:32:39Z", - "updated_at": "2024-08-23T09:49:54Z", + "updated_at": "2024-09-12T07:50:54Z", "author": 43, "repository": 502, - "assignees": [ - 142 - ], + "assignees": [], "labels": [ 71, 73 @@ -9685,6 +10010,7 @@ "node_id": "I_kwDOEAWEP85NhyZK", "title": "Provide text Search and Replace", "body": "**Describe what problem your feature request solves**\r\nSometimes there is a lot of renaming to be done on a threat project, for example if the namespace changes\r\n\r\n**Describe the solution you'd like**\r\nProvide text 'Search and Replace' functionality to the Text Search feature\r\n\r\n**Additional context**\r\nUser feedback \"_a mass replace functionality in addition to search. Because renamings happen_\"\r\n", + "summary": "Implement a 'Search and Replace' feature for text in the project.\n\n**Problem:** Address the need for mass renaming, particularly when namespaces change, by facilitating an efficient way to perform bulk edits across the codebase.\n\n**Solution:** Introduce a 'Search and Replace' functionality that enhances the existing text search feature. This should allow users to specify a search term and provide a replacement term, applying changes across the entire project or within selected files.\n\n**Technical Details:**\n- Extend the current search functionality to include a replace option.\n- Ensure the feature supports regular expressions for flexible search patterns.\n- Implement safeguards to prevent unintended replacements, such as a preview of changes before committing.\n- Consider integrating version control features to allow users to revert changes easily.\n\n**First Steps:**\n1. Analyze the current text search implementation to determine how to best integrate the search and replace functionality.\n2. Define the user interface changes needed for the 'Replace' option.\n3. Develop the core logic for replacing text, including handling edge cases like overlapping matches.\n4. Create unit tests to validate the search and replace functionality.\n5. Gather user feedback on the implementation to refine the feature further.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/454", @@ -9713,6 +10039,7 @@ "node_id": "I_kwDOEAWEP85NiD1X", "title": "View multiple diagrams", "body": "**Describe what problem your feature request solves**\r\nSome projects have more than one diagram, but only one can be viewed at any one time\r\n\r\n**Describe the solution you'd like**\r\nprovide ability to view multiple diagrams at the same time\r\n\r\n**Additional context**\r\nUser request \"_Can I open multiple diagrams in their own windows so that I can see them side by side? We have up to 4 diagrams_\"\r\n", + "summary": "Implement functionality to view multiple diagrams simultaneously. Address the limitation where only one diagram is viewable at a time, which hinders users managing projects with multiple diagrams.\n\n1. **Analyze Current Diagram Rendering**: Review the existing code for how diagrams are rendered and displayed. Identify the rendering engine or component responsible for displaying a single diagram.\n\n2. **Design Multi-Window Interface**: Create a user interface that allows users to open multiple windows or tabs for diagrams. Ensure that the interface maintains usability and clarity.\n\n3. **Modify State Management**: Adjust the state management to handle multiple diagram instances. This includes ensuring that each diagram can be independently manipulated without affecting others.\n\n4. **Implement Window Management**: Develop the logic for opening, closing, and arranging multiple windows. Consider using a library or framework that facilitates window management if needed.\n\n5. **Test Feature**: Conduct thorough testing with various numbers of diagrams (up to 4 as mentioned) to ensure stability and performance.\n\n6. **Gather User Feedback**: After implementation, collect feedback from users to refine the feature further.", "state": "open", "state_reason": "reopened", "url": "https://github.com/OWASP/threat-dragon/issues/458", @@ -9728,8 +10055,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 77 + 77, + 71 ] } }, @@ -9742,6 +10069,7 @@ "node_id": "I_kwDOEAWEP85NiFMr", "title": "add moveable labels for data flows and trust boundaries", "body": "**Describe what problem your feature request solves**\r\nThe labels for data flows and trust boundaries can not be moved, and may conflict on the diagram\r\n\r\n**Describe the solution you'd like**\r\nAbility to move the data flow and trust boundary labels, as they may coincide\r\n\r\n**Additional context**\r\nUser feedback \"_Labels for flows and boundaries are not movable, for example it can be not clear whether the label applies to a dataflow or a boundary_\"\r\n", + "summary": "Implement moveable labels for data flows and trust boundaries to enhance diagram clarity. \n\n**Problem to solve:**\nCurrent labels for data flows and trust boundaries are static, leading to potential overlap and confusion in diagrams.\n\n**Proposed solution:**\n1. Develop functionality to enable users to click and drag labels for data flows and trust boundaries to desired positions.\n2. Ensure that labels can be clearly associated with their respective elements to avoid ambiguity.\n\n**Technical details:**\n- Modify the diagram rendering logic to include a draggable property for labels.\n- Update the event handling to capture mouse events for label movement.\n- Implement collision detection to prevent labels from overlapping with other elements unless explicitly allowed.\n- Maintain the current association of labels to their respective data flows or trust boundaries during movement.\n\n**First steps:**\n1. Review the current implementation of the label rendering system.\n2. Identify the event listeners associated with label interactions.\n3. Begin prototyping the drag-and-drop functionality for labels.\n4. Test the movement feature for usability and clarity in various diagram scenarios.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/459", @@ -9757,8 +10085,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 83 + 83, + 71 ] } }, @@ -9771,6 +10099,7 @@ "node_id": "I_kwDOEAWEP85NiNTX", "title": "PDF report sections and ToC", "body": "**Describe what problem your feature request solves**\r\nIt can be difficult to find the diagrams in reports with multiple diagrams\r\n\r\n**Describe the solution you'd like**\r\nProvide sections and a Table of Contents for the diagrams in the PDF report\r\n\r\n**Additional context**\r\nUser feedback \"_Would be great to have PDF create sections for each diagram. Our models use multiple diagrams (up to 4) and it takes a while to manually find diagrams in a PDF report_\"\r\n", + "summary": "Implement a feature to add sections and a Table of Contents (ToC) for diagrams in PDF reports. This will enhance navigation in reports containing multiple diagrams, making it easier for users to locate specific diagrams quickly.\n\n**Technical Steps to Address This:**\n1. **Analyze Current PDF Generation Process**: Review the existing functionality used to generate PDF reports and how diagrams are currently included.\n2. **Define Data Structure**: Create a structure to store metadata for each diagram, including titles and page numbers, which will be used to generate the ToC.\n3. **Modify PDF Layout**: Update the PDF layout code to include sections for each diagram, ensuring each section is clearly labeled.\n4. **Generate Table of Contents**: Implement a method to automatically compile the ToC from the defined metadata, listing all diagrams with corresponding links to their sections.\n5. **Testing**: Conduct thorough testing with reports containing varying numbers of diagrams to ensure the ToC and sections function correctly and improve user navigation.\n6. **Gather User Feedback**: After implementation, solicit feedback from users to refine and enhance the feature.\n\nBy following these steps, improve the usability of PDF reports containing multiple diagrams and streamline the user experience.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/461", @@ -9786,9 +10115,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, 73, - 84 + 84, + 71 ] } }, @@ -9801,6 +10130,7 @@ "node_id": "I_kwDOEAWEP85NiQpG", "title": "Warn when dataflows dangle", "body": "**Describe what problem your feature request solves**\r\nA dataflow needs to be connected at both ends, otherwise it does not make sense\r\n\r\n**Describe the solution you'd like**\r\nThe dataflow colour or shape should signify that the dataflow is not connected at both ends\r\n\r\n**Additional context**\r\nUser request \"_There should be visual identification for dataflows that are not connected to objects on one or more sides (MS TMT used to list them)_\"\r\n", + "summary": "Implement a warning system for dangling dataflows. Ensure that each dataflow is visually identifiable when it is not connected at both ends. Modify the color or shape of the dataflow to indicate its unconnected status. \n\n### Technical Details:\n1. **Identify Dataflow Status**: Create a function that checks the connection status of each dataflow. This function should verify if both ends are linked to objects.\n2. **Define Visual Indicators**: Decide on specific colors or shapes to represent unconnected dataflows. For instance, use red color or dashed lines for unconnected flows.\n3. **Integrate into UI**: Modify the user interface to apply the visual changes dynamically as dataflows are created or modified.\n4. **User Feedback**: Implement a tooltip or message that provides additional information when a user hovers over a dangling dataflow.\n\n### First Steps:\n1. Review the existing dataflow structure and connection logic.\n2. Develop the function to check for connection status.\n3. Prototype the visual changes and gather user feedback.\n4. Test the implementation across various scenarios to ensure reliability.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/463", @@ -9829,6 +10159,7 @@ "node_id": "I_kwDOEAWEP85Pic6-", "title": "Provide timeouts / reporting for login failures", "body": "**Describe what problem your feature request solves**\r\nIf there is a problem logging in, specifically the 'Login with Github', then either a blank dashboard page is presented to the user, or a constantly spinning wait\r\n\r\n**Describe the solution you'd like**\r\nIf the login fails, present the user with an error message and return to the homepage\r\n\r\n**Additional context**\r\nWhen canceling login to github:\r\n\"cancel-login-to-github\"\r\n\r\n\r\nLog of cancelled login to Github:\r\n\r\n```\r\ndebug: controllers/auth.js: API login request: [object Object] {\"service\":\"threat-dragon\",\"timestamp\":\"2022-08-10 11:10:02\"}\r\ndebug: controllers/auth.js: API oauthReturn request: [object Object] {\"service\":\"threat-dragon\",\"timestamp\":\"2022-08-10 11:10:04\"}\r\ndebug: controllers/auth.js: API completeLogin request: [object Object] {\"service\":\"threat-dragon\",\"timestamp\":\"2022-08-10 11:10:04\"}\r\nerror: controllers/auth.js: {\"service\":\"threat-dragon\",\"timestamp\":\"2022-08-10 11:10:05\"}\r\nerror: Requires authentication {\"body\":{\"documentation_url\":\"https://docs.github.com/rest/reference/users#get-the-authenticated-user\",\"message\":\"Requires authentication\"},\"headers\":{\"access-control-allow-origin\":\"*\",\"access-control-expose-headers\":\"ETag, Link, Location, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Used, X-RateLimit-Resource, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval, X-GitHub-Media-Type, X-GitHub-SSO, X-GitHub-Request-Id, Deprecation, Sunset\",\"connection\":\"close\",\"content-length\":\"131\",\"content-security-policy\":\"default-src 'none'\",\"content-type\":\"application/json; charset=utf-8\",\"date\":\"Wed, 10 Aug 2022 10:10:05 GMT\",\"referrer-policy\":\"origin-when-cross-origin, strict-origin-when-cross-origin\",\"server\":\"GitHub.com\",\"strict-transport-security\":\"max-age=31536000; includeSubdomains; preload\",\"vary\":\"Accept-Encoding, Accept, X-Requested-With\",\"x-content-type-options\":\"nosniff\",\"x-frame-options\":\"deny\",\"x-github-media-type\":\"github.v3; format=json\",\"x-github-request-id\":\"DCD8:9FDC:C3D6C6:CE492A:62F383FD\",\"x-ratelimit-limit\":\"60\",\"x-ratelimit-remaining\":\"58\",\"x-ratelimit-reset\":\"1660129699\",\"x-ratelimit-resource\":\"core\",\"x-ratelimit-used\":\"2\",\"x-xss-protection\":\"0\"},\"service\":\"threat-dragon\",\"statusCode\":401,\"timestamp\":\"2022-08-10 11:10:05\"}\r\nerror: undefined {\"service\":\"threat-dragon\",\"timestamp\":\"2022-08-10 11:10:05\"}\r\ndebug: controllers/auth.js: Returning error to client: {\"status\":500,\"message\":\"Internal Server Error\",\"details\":\"Internal Server Error\"} {\"service\":\"threat-dragon\",\"timestamp\":\"2022-08-10 11:10:05\"}\r\n```\r\nWhen protocol mismatch occurs (HTTPS certs not valid):\r\n\"login-incompatible-protocol\"\r\n\r\n", + "summary": "Implement timeouts and error reporting for login failures.\n\n1. Identify the issue: Users encounter a blank dashboard or a spinning wait icon when the \"Login with GitHub\" feature fails.\n\n2. Propose a solution: Upon a login failure, display an error message to the user and redirect them back to the homepage.\n\n3. Review existing error logs for understanding:\n - Analyze the log entries related to login requests to identify failure points. For instance, check for HTTP status codes like 401 (authentication required) or 500 (internal server error) as shown in the example logs.\n\n4. Design the error handling process:\n - Create a timeout mechanism for the login process. If the login attempt exceeds a specified duration without a response, trigger an error state.\n - Implement a user-friendly error message that specifies the nature of the failure (e.g., \"Login Failed: Please check your credentials or try again later.\").\n\n5. Redirect users: Ensure that after displaying the error message, the application navigates users back to the homepage to provide a seamless experience.\n\n6. First steps to tackle the problem:\n - Review the authentication controller code to locate where the login requests are processed.\n - Implement a timeout function that cancels the login attempt if it takes too long.\n - Add error handling logic to manage different types of errors (e.g., authentication errors, protocol mismatches).\n - Test the implementation thoroughly to ensure that users receive appropriate feedback on login failures.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/481", @@ -9857,6 +10188,7 @@ "node_id": "I_kwDOEAWEP85QK_81", "title": "console.log redirected to log file", "body": "**Describe what problem your feature request solves**\r\nWe have some places where the console is used to log messages, which do not appear in the log file\r\n\r\n**Describe the solution you'd like**\r\nIt would be good to make sure that all console.log messages also appear in the log file. This has been done for electron logs, so would be good for server and vue logs\r\n\r\n**Additional context**\r\nAt the same time ensure that no tokens or PII is being logged\r\n", + "summary": "Implement a solution to redirect `console.log` messages to a log file. Ensure all logging mechanisms, including those used in server and Vue environments, capture console outputs.\n\n1. **Investigate Existing Logging Mechanisms**: Review how logging is currently implemented for Electron to identify potential methods for redirecting console output to files.\n\n2. **Modify Console Methods**: Override `console.log`, `console.warn`, and other relevant console methods to include file writing functionality. Utilize a logging library or Node.js' `fs` module to append logs to a designated log file.\n\n3. **Implement Filtering for Sensitive Data**: Integrate a mechanism to filter out any tokens or Personally Identifiable Information (PII) from the logs before writing them to the log file. Use regular expressions or a predefined list of sensitive keywords.\n\n4. **Test the Implementation**: Create unit tests to ensure that all console messages are captured in the log file and that no sensitive data is logged.\n\n5. **Document the Changes**: Update project documentation to reflect the new logging behavior, including examples of how messages will appear in the log file and any configurations needed for sensitive data filtering.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/493", @@ -9872,8 +10204,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 85 + 85, + 71 ] } }, @@ -9886,6 +10218,7 @@ "node_id": "I_kwDOEAWEP85bOYiH", "title": "Update jekyll versions for docs ", "body": "**Describe what problem your feature request solves**\r\nThe ruby gem files reference out of date versions\r\n\r\n**Describe the solution you'd like**\r\nupdate docs gem files for up to date versions\r\n\r\n**Additional context**\r\n\r\n", + "summary": "Update Jekyll versions for documentation. \n\nIdentify outdated Ruby gem files in the documentation. Replace current references with the latest stable versions of Jekyll and its dependencies. \n\nFirst steps: \n1. Check the current Jekyll version in the Gemfile and Gemfile.lock files.\n2. Visit the official Jekyll repository or RubyGems to find the latest versions.\n3. Update the Gemfile with the new version numbers.\n4. Run `bundle update` to ensure all dependencies are resolved.\n5. Test the documentation site locally to confirm that updates do not introduce any issues. \n6. Commit the changes and submit a pull request for review.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/584", @@ -9901,9 +10234,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, 85, - 86 + 86, + 71 ] } }, @@ -9916,6 +10249,7 @@ "node_id": "I_kwDOEAWEP85eq-qF", "title": "Support for more granular threats using threat trees", "body": "**Describe what problem your feature request solves**\r\nWhile documenting specific threats, it would be helpful if we could select more granular threats in the Edit Threat modal. For example, LINDDUN and STRIDE both have threat trees corresponding to each threat. The frameworks refer to these trees in their mitigation strategies section, so having this feature would be useful to quickly find the corresponding mitigations. More context is provided below.\r\n\r\n**Describe the solution you'd like**\r\nCurrently we can select the type of threat in the Edit Threat modal. This type corresponds to the root node of a threat tree. Perhaps there could be an additional drop-down menu below this with options for each of the child nodes in the corresponding threat trees?\r\n\r\n**Additional context**\r\nFor example, the Linkability threat in LINDDUN has the threat tree shown below. On selecting Linkability in the Edit Threat modal, another dropdown below could have options corresponding to the child nodes in this tree. \r\n\r\n\"diagram\r\n\r\nSource: [Linkability Threat Trees](https://www.linddun.org/linkability)\r\n\r\nHaving this information could then allow us to find the corresponding mitigation strategies from a table like this one:\r\n\r\n\"linddun\r\n\r\nSource: [LINDDUN Mitigation Strategies](https://www.linddun.org/mitigation-strategies-and-solutions)\r\n\r\nThank you!", + "summary": "Implement support for granular threats using threat trees in the Edit Threat modal. Enable selection of child nodes corresponding to threat trees like LINDDUN and STRIDE. \n\n1. Modify the Edit Threat modal to include a new dropdown menu beneath the existing threat type selection.\n2. Populate this dropdown with options for each child node from the selected threat tree.\n3. Ensure that selecting a child node allows users to easily reference associated mitigation strategies.\n4. Review the Linkability threat tree from LINDDUN as a primary example for implementation, ensuring the structure aligns with existing frameworks.\n5. Test the feature to confirm that it accurately displays child nodes and links to relevant mitigation strategies.\n\nThese steps will enhance usability and speed up the threat documentation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/607", @@ -9933,8 +10267,8 @@ 143 ], "labels": [ - 71, - 77 + 77, + 71 ] } }, @@ -9947,6 +10281,7 @@ "node_id": "I_kwDOEAWEP85gb7yz", "title": "Allow user to select multiple threat types", "body": "**Describe what problem your feature request solves**\r\nAt the moment when selecting a threat type, I can only select one of the available options. For example, I can indicate a threat covers Confidentiality **or** Integrity **or** Availability, but cannot indicate it affects multiple of them. This creates a problem when discussing threats that legitimately affect multiple areas—for instance, a service crash that creates Availability issues and may disclose information while crashing that threatens Confidentiality.\r\n\r\n**Describe the solution you'd like**\r\nThreat types should be multi-select, perhaps with check-boxes or the Option-key-to-select-multiple solution. This enables users to select one or multiple depending on the context of the threat.", + "summary": "Implement multi-select functionality for threat types. \n\n**Problem Addressed**: Currently, users can only select a single threat type (Confidentiality, Integrity, Availability), which limits the ability to accurately represent threats affecting multiple areas, such as a service crash impacting both Availability and Confidentiality.\n\n**Proposed Solution**: Introduce check-boxes or an Option-key mechanism to allow users to select multiple threat types simultaneously. This will improve threat categorization and facilitate better discussions around multi-faceted threats.\n\n**First Steps to Tackle the Problem**:\n1. Analyze the current UI component handling threat type selection.\n2. Modify the selection component to support multiple selections (consider using a library or framework that handles check-boxes).\n3. Update the backend logic to accept and process multiple threat types.\n4. Perform testing to ensure that both selection and data handling work as expected.\n5. Collect user feedback on the new feature for further refinements.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/620", @@ -9962,9 +10297,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, 72, - 73 + 73, + 71 ] } }, @@ -9977,6 +10312,7 @@ "node_id": "I_kwDOEAWEP85hGL0h", "title": "Flatpak/flathub support", "body": "**Describe what problem your feature request solves**\r\nA [Flatpak](https://flatpak.org/) install would be a great alternative to Snaps and Appimages, since this allows most users to download threat-dragon straight from their software store. Would people be open for this? I could look into it myself but I want to see if there is an appetite for it.\r\n\r\nFlatpack [instructions](https://github.com/flatpak/flatpak) and [flathub](https://flathub.org/)\r\nalong with [electron and flatpak](https://docs.flatpak.org/en/latest/electron.html)", + "summary": "Implement Flatpak support for Threat Dragon. \n\nEvaluate the benefits of offering a Flatpak installation option as an alternative to Snaps and AppImages. Focus on the convenience for users in downloading Threat Dragon directly from their software store.\n\nResearch Flatpak and Flathub documentation thoroughly. Review the Flatpak [instructions](https://github.com/flatpak/flatpak) and explore Flathub [guidelines](https://flathub.org/). \n\nInvestigate how to package the application using Electron within Flatpak. Consult the [Electron and Flatpak documentation](https://docs.flatpak.org/en/latest/electron.html) for specific requirements.\n\nBegin by:\n1. Setting up a Flatpak development environment.\n2. Creating a Flatpak manifest file for the Threat Dragon application.\n3. Testing the build process locally to ensure functionality.\n4. Engaging with the community to gauge interest in this feature.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/626", @@ -9995,10 +10331,10 @@ 146 ], "labels": [ - 71, - 78, 80, - 82 + 82, + 78, + 71 ] } }, @@ -10011,6 +10347,7 @@ "node_id": "I_kwDOEAWEP85jHNdW", "title": "Use RAAML for threat models", "body": "**Describe what problem your feature request solves**\r\n\r\n\r\nThe Risk Analysis and Assessment Modeling Language (RAAML) specification is a sysml compliant format that would allow integration with other modeling capabilities such as simulation. It is also a component UAFML. \r\n\r\n**Describe the solution you'd like**\r\n\r\n\r\nImplement modeling capabilities to support RAAML either as default or as content import/export.\r\n\r\n**Additional context**\r\n\r\n\r\nhttps://www.omg.org/spec/RAAML/1.0/Beta2/About-RAAML\r\nhttps://emfjson.github.io/projects/ecorejs/latest/", + "summary": "Implement RAAML support for threat models. Utilize the Risk Analysis and Assessment Modeling Language (RAAML) specification to facilitate integration with simulation tools and enhance modeling capabilities. Ensure compatibility with UAFML and SysML standards.\n\n**First Steps:**\n1. Review the RAAML specification at the provided OMG link to understand its structure and requirements.\n2. Analyze current modeling capabilities and identify integration points for RAAML.\n3. Design a framework for importing and exporting RAAML content.\n4. Develop a prototype that demonstrates basic RAAML modeling capabilities.\n5. Test the prototype with existing threat models to validate functionality.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/639", @@ -10026,9 +10363,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, 73, - 77 + 77, + 71 ] } }, @@ -10041,6 +10378,7 @@ "node_id": "I_kwDOEAWEP85jNXUB", "title": "Make the API OSLC compliant", "body": "**Describe what problem your feature request solves**\r\n\r\n\r\nModeling tools have their own interoperability specification called Open Services for Lifecycle Collaboration (OSLC). It would be nice if Threat Dragon could provide OSLC compatibility to facilitate ALM / PLM integration.\r\n\r\n**Describe the solution you'd like**\r\n\r\n\r\nProvide OSLC compatible API and documentation with OpenAPI.\r\n\r\n**Additional context**\r\n\r\n\r\nhttps://open-services.net/\r\nhttps://www.ibm.com/docs/en/elms/elm/6.0.4?topic=integrating-oslc-integrations\r\nhttps://docs.oasis-open-projects.org/oslc-op/qm/v2.1/os/quality-management-spec.html\r\n", + "summary": "Implement OSLC Compliance for API\n\n- Address the need for interoperability among modeling tools by making the Threat Dragon API OSLC compliant.\n- Ensure compatibility with Open Services for Lifecycle Collaboration (OSLC) to enable integration with Application Lifecycle Management (ALM) and Product Lifecycle Management (PLM) systems.\n- Develop an OSLC-compatible API that adheres to the specifications outlined on the OSLC website and related documentation.\n\nFirst Steps:\n1. Review the OSLC specifications and guidelines available at https://open-services.net/ to understand the requirements for compliance.\n2. Analyze the current Threat Dragon API structure and identify necessary modifications to align with OSLC standards.\n3. Create a design plan for the new OSLC-compatible API, including endpoints and data formats.\n4. Implement the API changes, ensuring to include detailed documentation using OpenAPI for ease of use and integration by third parties.\n5. Test the new API for compliance with OSLC standards and validate functionality with ALM/PLM tools.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/641", @@ -10056,9 +10394,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, 73, - 87 + 87, + 71 ] } }, @@ -10071,6 +10409,7 @@ "node_id": "I_kwDOEAWEP85jhf0_", "title": "Use the new File System Access API on Chromium browsers", "body": "**Describe what problem your feature request solves**\r\nThe actual way of handling local threat models could generate a lot of files because a new file is generated each time a user saves the working threat model.\r\n\r\n**Describe the solution you'd like**\r\nOn chromium based browsers the new File System Access API should be used (it's the one used in https://vscode.dev/)\r\n\r\n**Additional context**\r\nBlog presenting the API: https://developer.chrome.com/articles/file-system-access/ \r\n", + "summary": "Implement the File System Access API for Chromium browsers to improve local threat model file handling. \n\n**Problem to Solve:**\nCurrent file management generates excessive files by creating a new file each time a user saves a threat model, leading to clutter and inefficiency.\n\n**Proposed Solution:**\nUtilize the File System Access API to allow direct access and manipulation of files in the user's local file system. This will streamline the saving process, enabling users to overwrite existing files or manage their local files more effectively.\n\n**Technical Steps to Tackle the Problem:**\n1. Research the File System Access API and understand its capabilities and limitations.\n2. Update the codebase to replace current file handling methods with the File System Access API.\n3. Implement a file picker UI to allow users to select existing files to overwrite or choose locations for new files.\n4. Test the integration across different Chromium-based browsers to ensure compatibility and performance.\n5. Document the changes and provide user guidance on the new file handling process.\n\n**Resources:**\nRefer to the detailed blog on the File System Access API for implementation guidelines: https://developer.chrome.com/articles/file-system-access/", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/643", @@ -10088,8 +10427,8 @@ 43 ], "labels": [ - 71, - 80 + 80, + 71 ] } }, @@ -10102,6 +10441,7 @@ "node_id": "I_kwDOEAWEP85pxCwC", "title": "Provide demo versions of 1.x and 2.x to combined site", "body": "**Describe what problem your feature request solves**:\r\nIt would be good to have demos of both versions 1.x and 2.x on a combined site alongside pytm\r\n\r\n**Describe the solution you'd like**:\r\n@izar has offered to provide access to [threatmodeling.dev](http://threatmodeling.dev/) site so that demos of pytm and Threat Dragon can be alongside each other\r\n\r\n**Additional context**:\r\nControl of the site would also be with OWASP along with @izar, and this will guarantee long term continuity\r\n", + "summary": "Create demo versions of both 1.x and 2.x on a combined site with pytm. Ensure that the demos are accessible through the threatmodeling.dev platform. \n\n1. Collaborate with @izar to set up the environment on the threatmodeling.dev site.\n2. Establish a version control process to manage updates for both 1.x and 2.x demos.\n3. Ensure that OWASP maintains control over the site for long-term support and continuity, involving @izar in the management.\n4. Develop a user-friendly interface to allow easy navigation between the demos of pytm and Threat Dragon.\n5. Test the demos thoroughly to ensure they function correctly and are user-friendly before launch. \n\nBy implementing these steps, you will provide users with an integrated experience of the different versions available.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/684", @@ -10117,9 +10457,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, + 87, 79, - 87 + 71 ] } }, @@ -10132,6 +10472,7 @@ "node_id": "I_kwDOEAWEP85qAOP3", "title": "Integration with OpenAI API", "body": "**Describe what problem your feature request solves**:\r\nIt would be good to investigate Threat Dragon integration with OpenAI API\r\n\r\n**Describe the solution you'd like**:\r\nDescription of what it would take and roughly how long\r\n\r\n**Additional context**:\r\n@cvlucian asks: _\"research into Threat Dragon being integrated with OpenAI API? Endless possibilities probably to populate threat models models based on technical spec docs or simple input, AI assessment of risks etc.\"_\r\n", + "summary": "Investigate integration of Threat Dragon with OpenAI API. Focus on leveraging OpenAI's capabilities to enhance threat modeling processes by populating models using technical specifications or simple inputs. Assess potential for AI-driven risk evaluations.\n\nOutline the required steps for integration:\n1. Research OpenAI API documentation to understand its functionalities and limitations.\n2. Define use cases for Threat Dragon, such as:\n - Automatically generating threat models from input data.\n - Conducting AI-based assessments of identified risks.\n3. Evaluate data inputs and outputs necessary for seamless integration.\n4. Create a prototype to test API interactions with Threat Dragon.\n5. Estimate implementation time based on complexity of integration tasks.\n\nCollaborate with @cvlucian and other contributors to refine use cases and gather additional insights on practical applications. Prioritize identifying any potential challenges related to data privacy and compliance when utilizing AI models for sensitive information.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/685", @@ -10160,6 +10501,7 @@ "node_id": "I_kwDOEAWEP85twZey", "title": "Enable threats to be reassigned and/or duplicated to other components", "body": "**Describe what problem your feature request solves**:\r\n\r\nOnce a threat is created, it is not possible to duplicate it or move it to other components. You need to manually repeat the whole process or delete a threat and add new one in different component.\r\n\r\n**Describe the solution you'd like**:\r\n\r\nIt would be really useful to be able to move threats from one component to another and to duplicate once created threat in other component. \r\n\r\n**Additional context**:\r\n\r\nThere are threats applicable to many components and it is time-consuming to create the same threat in every one of them. Also, once the component is created and threats are added to them, if you want to split it into two components you need to recreate every threat from the original one.\r\n", + "summary": "Implement functionality to enable reassignment and duplication of threats across components. \n\n**Problem Statement**: \nCurrently, threats can only be created manually for each component, leading to inefficiencies. Users must duplicate efforts by recreating threats or deleting and re-adding them to different components, which is time-consuming.\n\n**Proposed Solution**: \nIntroduce features that allow users to:\n1. Move a threat from one component to another.\n2. Duplicate an existing threat to another component directly.\n\n**Technical Considerations**:\n- Modify the threat management system to include options for 'Move' and 'Duplicate' actions.\n- Ensure that moving a threat updates all relevant relationships and dependencies associated with that threat.\n- Implement a user interface that allows easy selection of target components for duplication or reassignment.\n- Consider the impact on data integrity and ensure that threats remain consistent across components.\n\n**Next Steps**:\n1. Analyze the current database schema to understand how threats are linked to components.\n2. Design the user interface for moving and duplicating threats.\n3. Develop the backend logic for moving threats, ensuring proper handling of existing references.\n4. Test the new functionalities thoroughly to avoid data inconsistencies.\n5. Document the changes and provide user guidelines for utilizing the new features.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/716", @@ -10175,8 +10517,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 73 + 73, + 71 ] } }, @@ -10189,6 +10531,7 @@ "node_id": "I_kwDOEAWEP85uU5Sc", "title": "Upgrade to latest antv/x6 drawing package", "body": "**Describe what problem your feature request solves**:\r\nwe have a lot of niggles with the drawing package, and this may be because we are using an old version :\r\n\r\n```\r\n \"@antv/x6\": \"^1.32.7\",\r\n \"@antv/x6-vue-shape\": \"^1.4.0\",\r\n```\r\n\r\n**Describe the solution you'd like**:\r\nupgrade to latest version 2.x - note that this is a breaking change:\r\n\r\n```\r\n \"@antv/x6\": \"^2.13.1\",\r\n \"@antv/x6-vue-shape\": \"^2.1.1\",\r\n```\r\n\r\n**Additional context**:\r\n\r\n", + "summary": "Upgrade the antv/x6 drawing package to the latest version to resolve existing issues with the current implementation. The current dependencies are:\n\n```\n \"@antv/x6\": \"^1.32.7\",\n \"@antv/x6-vue-shape\": \"^1.4.0\",\n```\n\nTransition to the following versions, noting that this is a breaking change:\n\n```\n \"@antv/x6\": \"^2.13.1\",\n \"@antv/x6-vue-shape\": \"^2.1.1\",\n```\n\n### Steps to Tackle the Problem:\n\n1. **Review Release Notes**: Examine the release notes for versions 2.x to identify breaking changes and new features.\n \n2. **Update Dependencies**: Modify the `package.json` to reflect the new versions.\n\n3. **Run Tests**: Execute existing unit and integration tests to identify breaking changes or issues introduced by the upgrade.\n\n4. **Refactor Code**: Address any deprecated methods or properties within the codebase that may have changed in the new version.\n\n5. **Test Functionality**: Manually test the application to ensure all drawing functionalities work as expected with the upgraded packages.\n\n6. **Document Changes**: Update documentation to reflect any changes in usage due to the upgrade.\n\n7. **Monitor for Bugs**: After deployment, monitor the application for any new issues that may arise from the upgrade.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/722", @@ -10206,10 +10549,10 @@ 43 ], "labels": [ - 71, - 75, 83, - 87 + 75, + 87, + 71 ] } }, @@ -10222,6 +10565,7 @@ "node_id": "I_kwDOEAWEP85zLzuV", "title": "provide pipeline tests for utilities", "body": "**Describe what problem your feature request solves**:\r\nWe are not sure if the utilities still run OK, and these should be checked in one of the pipelines\r\n\r\n**Describe the solution you'd like**:\r\nCheck that the utilities still run in the CI pipeline\r\n\r\n**Additional context**:\r\nUtilities:\r\n* DragonExtractor\r\n* threat-mvp\r\n* TMT2TD\r\nand possibly [DragonGPT](https://github.com/LuizBoina/dragon-gpt)\r\n\r\n", + "summary": "Implement pipeline tests for utilities to ensure functionality. Verify that the following utilities execute correctly within the CI pipeline:\n\n1. DragonExtractor\n2. threat-mvp\n3. TMT2TD\n4. Potentially include DragonGPT\n\nBegin by creating test cases for each utility. Use unit tests to validate core functionalities and integration tests for interactions with other components. \n\nSteps to tackle the problem:\n1. Review the existing utility code to identify key functions and expected outputs.\n2. Develop unit tests for each utility, covering edge cases and typical use cases using a testing framework like pytest or unittest.\n3. Integrate these tests into the CI pipeline configuration, ensuring they run on each commit or pull request.\n4. Monitor test results and adjust as necessary to resolve any failures or issues.\n5. Document the testing process and update README files where appropriate to guide future contributions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/762", @@ -10237,8 +10581,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 79 + 79, + 71 ] } }, @@ -10251,6 +10595,7 @@ "node_id": "I_kwDOEAWEP8524btK", "title": "Clicking on data flow does not open it in properties", "body": "**Describe the bug**:\r\nClicking on an existing data flow does not set its properties into the properties section. A double click is needed, but it results in an addition of a new point on the data flow.\r\n\r\n**Expected behaviour**:\r\nA single click on the data flow makes its properties accessible in the properties section\r\n\r\n**Environment**:\r\n\r\n- Version: 2.1.1\r\n- Platform: Desktop App and Web App\r\n- OS: MAC OS / Windows / Linux\r\n- Browser: All\r\n\r\n**To Reproduce**:\r\n1. Add a data flow\r\n2. Select another component on the diagram\r\n3. Click on the data flow just created\r\n\r\n", + "summary": "Fix the issue where clicking on a data flow does not open its properties. Ensure that a single click on the data flow correctly populates the properties section instead of requiring a double click, which inadvertently adds a new point to the data flow.\n\n**Technical Details**:\n- Investigate the event handling for mouse clicks on data flow components within the properties section.\n- Check the logic that differentiates between single and double click actions.\n- Update the event listener for the data flow to ensure that it sets the properties on a single click.\n\n**First Steps**:\n1. Review the code responsible for handling clicks on data flow components.\n2. Identify the function that currently manages the properties section update.\n3. Modify the click event listener to trigger the properties update on a single click.\n4. Test the changes across different platforms (Mac OS, Windows, Linux) and in various browsers to ensure consistent behavior.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/785", @@ -10284,6 +10629,7 @@ "node_id": "I_kwDOEAWEP853bZp0", "title": "Add the full database of PLOT4ai threats to Threat Dragon", "body": "**Describe what problem your feature request solves**:\r\nPLOT4ai contains a database of 86 threats. Threat Dragon recently added support for the categories, similar to the other frameworks, but it would be nice if the threats under each category would become available from a drop-down menu as well.\r\nThe way threats are organized under the categories can be observed here: https://plot4.ai/library\r\n\r\n**Describe the solution you'd like**:\r\nWhen you add a threat (or edit one), you can already select the threat type, which corresponds to the plot4ai category.\r\nPLOT4ai has a [json file](https://github.com/PLOT4ai/plot4ai-library/blob/main/deck.json) available with all the threats; based on this json file and the selected type/category, the relevant threats could be loaded and used to populate a drop-down box called 'threat'. The user could then select the applicable threat.\r\nAdditionally, the information in the _explanation_ field could be used to prefill the Description-field in Threat Dragon and the information in the _recommendations_ field could be used to prefill the Mitigation field in Threat Dragon\r\n\r\n**Additional context**:\r\nPLOT4ai is in it's setup very similar to LINDDUN, which is also part of Threat Dragon. Adding this functionality would also open the possibility to add the threats from LINDDUN to threat dragon.\r\n\r\nThe additions of 'Development Phase' and 'Threat' to the threat dialog should only be visible when the diagram type is 'PLOT4ai', and possibly also for 'LINDDUN' diagram type.\r\n", + "summary": "Implement the full database of PLOT4ai threats in Threat Dragon. \n\n**Problem to solve**: Integrate 86 PLOT4ai threats into Threat Dragon, utilizing a user-friendly drop-down menu to enhance user experience when selecting threats.\n\n**Proposed solution**:\n1. Utilize the existing PLOT4ai [JSON file](https://github.com/PLOT4ai/plot4ai-library/blob/main/deck.json) to extract threat data.\n2. Modify the threat addition/editing interface to include a drop-down menu for selecting threats based on the chosen PLOT4ai category.\n3. Prefill the Description field in Threat Dragon with the information from the _explanation_ field of the JSON and the Mitigation field with data from the _recommendations_ field.\n4. Ensure that these new fields ('Development Phase' and 'Threat') appear only when the diagram type is set to 'PLOT4ai' or 'LINDDUN'.\n\n**First steps**:\n- Review the structure of the PLOT4ai JSON file to understand how threats are categorized.\n- Modify the front-end code of Threat Dragon to create a drop-down menu that dynamically populates with threats based on the selected category.\n- Implement the functionality to prefill the Description and Mitigation fields from the corresponding JSON data.\n- Test the integration to ensure that the drop-down menu and prefilled fields work correctly within the Threat Dragon interface.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/796", @@ -10301,9 +10647,9 @@ 150 ], "labels": [ - 71, 72, - 73 + 73, + 71 ] } }, @@ -10316,6 +10662,7 @@ "node_id": "I_kwDOEAWEP853bcDN", "title": "Add the full database of LINDDUN threats to Threat Dragon", "body": "**Describe what problem your feature request solves**:\r\nLINDDUN contains a database of 35 threats. Threat Dragon already has support for the categories, similar to the other frameworks, but it would be nice if the threats under each category would become available from a drop-down menu as well.\r\nThe way threats are organized under the categories can be observed here: https://downloads.linddun.org/linddun-go/default/v20230802/go.pdf\r\n\r\n**Describe the solution you'd like**:\r\nWhen you add a threat (or edit one), you can already select the threat type, which corresponds to the linddun category.\r\nIf the linddun cards were to be digitalized in json format - similar to plot4ai - then this could serve as a database; then, based on this json file and the selected type/category, the relevant threats could be loaded and used to populate a drop-down box called 'threat'. The user could then select the applicable threat.\r\nAdditionally, the rest of the information on the linddun cards could be used to prefill the Description-field in Threat Dragon.\r\n\r\n**Additional context**:\r\nLINDDUN is very similar to PLOT4ai, which was also recently added to Threat Dragon. Adding this functionality would also open the possibility to add the threats from PLOT4ai to threat dragon.", + "summary": "Implement full integration of the LINDDUN threat database into Threat Dragon. \n\n**Problem**: Enhance the usability of Threat Dragon by allowing users to select specific LINDDUN threats from a drop-down menu, improving threat identification in threat modeling.\n\n**Solution**: \n1. Digitalize the LINDDUN threat cards into a JSON format, similar to the PLOT4ai integration.\n2. Update the Threat Dragon interface to include a new drop-down menu for threats, linked to the category selected by the user.\n3. Populate the drop-down based on the JSON data, allowing users to select relevant threats based on their chosen category.\n4. Use additional details from the LINDDUN cards to automatically fill the Description field in Threat Dragon when a threat is selected.\n\n**First Steps**:\n- Gather the JSON format of the LINDDUN threat cards from the provided PDF link.\n- Develop a mapping of the threat categories to the existing Threat Dragon structure.\n- Implement the front-end changes to introduce a new drop-down menu for threat selection.\n- Test the integration with sample data to ensure smooth functionality and accurate threat population.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/797", @@ -10333,9 +10680,9 @@ 150 ], "labels": [ - 71, 72, - 73 + 73, + 71 ] } }, @@ -10344,10 +10691,11 @@ "pk": 361, "fields": { "nest_created_at": "2024-09-11T20:09:18.224Z", - "nest_updated_at": "2024-09-11T20:09:18.224Z", + "nest_updated_at": "2024-09-13T03:53:44.409Z", "node_id": "I_kwDOEAWEP854AXzc", "title": "Isolate Authentication and Repository", "body": "**Describe what problem your feature request solves**:\r\nIn reviewing #629, #426 and #1 I believe we need to decouple Authentication from the Repository, in cases where it is sensible.\r\n\r\n| Authentication | Repository | Status |\r\n| ------------- | ------------- |---------|\r\n| Github | Github Repo | Implemented |\r\n| Bitbucket | Bitbucket Repo | Implemented |\r\n| AWS IAM | S3 | #426 requests |\r\n| AWS IAM | AWS SQL | New |\r\n| Azure | Azure Blob | New |\r\n| Azure | Azure SQL | New |\r\n(Note - I am not proposing to build all of these combos!)\r\n\r\n\r\n**Describe the solution you'd like**:\r\n- [ ] Enable either intermediate screens between Provider selection and Repo selection for choice of Repository (where appropriate) or rely upon property files only.\r\n- [ ] Isolate the Provider and Repository coupling in Node - at present the authentication mechanism sets the repository, and they are 1-2-1.\r\n\r\n**Key Questions**:\r\n1. Is there a valid use case here to have a single Authentication Provider enable access to multiple types of repository? (I am not anticipating multiple repositories within a threat dragon instance)\r\n2. Do we want end users/modellers, not deployers, to select between repository options themselves or defer this to config in deployment?\r\n\r\nAs always, happy to be told this is beyond the scope or vision of the platform. ", + "summary": "Isolate Authentication from Repository\n\n**Problem Solving**: Decouple Authentication from Repository to allow flexible integration with various repository types without direct coupling.\n\n**Proposed Solution**:\n1. Implement intermediate screens or use property files to allow users to choose the repository type after selecting the authentication provider.\n2. Refactor Node to eliminate the 1-2-1 relationship between authentication mechanisms and repositories, enabling a single authentication provider to access multiple repository types.\n\n**Key Questions to Address**:\n- Validate the use case for a single Authentication Provider accessing multiple repository types.\n- Determine if end users should choose repository options or if this decision should be made during deployment configuration.\n\n**First Steps**:\n1. Review existing code to identify where authentication and repository coupling occurs.\n2. Design a prototype for intermediate screens or property file management.\n3. Assess the implications of refactoring authentication and repository interaction on current implementations.\n4. Gather feedback on use cases from stakeholders to guide the development process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/805", @@ -10358,12 +10706,10 @@ "comments_count": 1, "closed_at": null, "created_at": "2023-11-28T00:24:25Z", - "updated_at": "2024-07-15T09:23:02Z", + "updated_at": "2024-09-12T07:58:30Z", "author": 141, "repository": 502, - "assignees": [ - 141 - ], + "assignees": [], "labels": [ 71, 74 @@ -10379,6 +10725,7 @@ "node_id": "I_kwDOEAWEP857QYJr", "title": "Desktop should show indication when loading", "body": "**Describe what problem your feature request solves**:\r\nThe desktop application can take a long time to load, and the spinner in App.vue does not show while loading\r\n\r\n**Describe the solution you'd like**:\r\nDesktop app shows indication of loading\r\n\r\n**Additional context**:\r\nsteps to reproduce:\r\n1. download the installer for a new version of Threat Dragon\r\n2. install the new version\r\n3. invoke the new version and wait up to 30s before getting a window appearing\r\n", + "summary": "Implement a loading indicator for the desktop application during startup. \n\n**Problem to Solve**: The desktop application currently lacks a visual indication when it is loading, leading to user confusion during the delay, which can last up to 30 seconds.\n\n**Solution**: \n1. Modify the App.vue component to display a loading spinner during the initialization phase of the application.\n2. Ensure the spinner is visible until the application fully loads and the main window appears.\n\n**Technical Steps**:\n1. Identify the lifecycle hooks in App.vue responsible for application startup.\n2. Introduce a reactive property (e.g., `isLoading`) to manage the loading state.\n3. Set `isLoading` to `true` at the beginning of the startup process and change it to `false` once the application is fully loaded.\n4. Update the template in App.vue to conditionally render the loading spinner based on the `isLoading` property.\n5. Test the implementation by installing a new version of Threat Dragon and verifying that the loading spinner appears during startup.\n\n**Next Steps**:\n- Review the current initialization code in App.vue.\n- Create a simple loading spinner component if none exists.\n- Implement the state management for loading indication.\n- Conduct user testing to ensure the loading experience is improved.", "state": "open", "state_reason": "reopened", "url": "https://github.com/OWASP/threat-dragon/issues/823", @@ -10394,9 +10741,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, 75, - 78 + 78, + 71 ] } }, @@ -10405,10 +10752,11 @@ "pk": 363, "fields": { "nest_created_at": "2024-09-11T20:09:22.413Z", - "nest_updated_at": "2024-09-11T20:09:22.413Z", + "nest_updated_at": "2024-09-13T03:53:45.749Z", "node_id": "I_kwDOEAWEP857ZVYL", "title": "PDF report generation froze the application", "body": "**Describe the bug**:\r\nGenerating a PDF report halted the desktop application\r\n\r\n**Expected behaviour**:\r\nApplication should not freeze\r\n\r\n**Environment**:\r\n\r\n- Version: 2.1.2\r\n- Platform: Desktop App\r\n- OS: Windows\r\n- Browser: electron\r\n\r\n**To Reproduce**:\r\n\r\n**Any additional context, screenshots, etc**:\r\n@mikkolehtisalo reports \"the Windows Server we run the tool on acted differently from ordinary desktop operating systems. I ended up moving the data to Excel/Word and finishing the report by hand.\"\r\n", + "summary": "Investigate the application freeze during PDF report generation. Ensure the application remains responsive while generating reports.\n\n**Technical Details**:\n- Version: 2.1.2\n- Platform: Desktop App\n- OS: Windows (specifically mentioned issues on Windows Server)\n- Browser: Electron\n\n**Steps to Reproduce**:\n1. Open the application.\n2. Initiate PDF report generation.\n\n**First Steps to Tackle the Problem**:\n1. Analyze the report generation code for synchronous operations that could block the main thread.\n2. Implement asynchronous processing for PDF generation to prevent UI freeze.\n3. Test the application on both regular desktop and Windows Server environments to replicate the issue.\n4. Utilize logging to capture any errors or performance metrics during report generation. \n5. Consider using a worker thread or a background process for handling PDF generation tasks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/826", @@ -10419,16 +10767,14 @@ "comments_count": 5, "closed_at": null, "created_at": "2024-01-08T11:16:52Z", - "updated_at": "2024-07-18T18:54:57Z", + "updated_at": "2024-09-12T07:56:10Z", "author": 43, "repository": 502, - "assignees": [ - 151 - ], + "assignees": [], "labels": [ + 88, 78, - 81, - 88 + 81 ] } }, @@ -10441,6 +10787,7 @@ "node_id": "I_kwDOEAWEP857ZbP2", "title": "Generic git support", "body": "**Describe what problem your feature request solves**:\r\nNot everyone uses github or bitbucket for git repo access, it would be good to provide for these organizations\r\n\r\n**Describe the solution you'd like**:\r\nProvide support for git, not specific to github or bitbucket\r\n\r\n**Additional context**:\r\n@mikkolehtisalo reports \"the application has capability to save into github. However configuring internal git that is not github would require code changes. More generic git support would be super.\"\r\n", + "summary": "Implement generic Git support to enable repository access beyond GitHub and Bitbucket. \n\n1. Assess current integration capabilities and identify hard-coded dependencies on GitHub and Bitbucket within the application.\n2. Develop a flexible configuration system that allows users to input their own Git repository details (e.g., URL, authentication).\n3. Refactor the codebase to utilize standard Git commands and APIs, ensuring compatibility with various Git hosting services.\n4. Test the implementation with multiple Git providers to validate functionality and stability.\n5. Update documentation to guide users on how to configure their own Git repositories.\n\nStart by analyzing the existing code that handles Git interactions and outline a plan for abstraction to accommodate different Git providers.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/828", @@ -10456,9 +10803,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, 74, - 75 + 75, + 71 ] } }, @@ -10471,6 +10818,7 @@ "node_id": "I_kwDOEAWEP857rNmj", "title": "Revise data model of threat + mitigations", "body": "**Describe what problem your feature request solves**:\r\nMultiple mitigations for a threat are possible, and also multiple threats per mitigations\r\n\r\n**Describe the solution you'd like**:\r\nrevise data model of threat and mitigations to be more flexible\r\n\r\n**Additional context**:\r\nUser reports:\r\n\"_The data model for threat mitigations is in my opinion simply incorrrect. One threat may have many mitigations, which has different implementation status. At this moment the tool seems to allow tracking only one mitigation per threat._\r\n\r\n_Not being able to copy or move threats makes impossible to refactor the environment image later on. I would take even \"make a copy of this threat to there\" without any linking of contents to fix this. Without this I had to redraw the environment and do a lot of manual work to get the final report cleaned up._\"\r\n\r\n", + "summary": "Revise the data model for threats and mitigations to enhance flexibility. \n\n**Address the following issues**:\n1. Allow multiple mitigations for each threat, accommodating various implementation statuses.\n2. Enable multiple threats to be associated with each mitigation.\n\n**Implement a solution**:\n1. Redesign the database schema to support a many-to-many relationship between threats and mitigations, possibly using a join table.\n2. Update the application logic to handle the new relationships, ensuring that users can add, edit, and view multiple mitigations per threat and vice versa.\n\n**Consider additional features**:\n1. Implement functionality to copy or move threats, allowing users to refactor environment images without redundant manual work.\n\n**First steps**:\n1. Analyze the current data model structure and identify necessary changes.\n2. Create a prototype of the new data model and gather feedback from stakeholders.\n3. Begin implementing the database schema changes and update the application code accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/834", @@ -10486,8 +10834,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 75 + 75, + 71 ] } }, @@ -10500,6 +10848,7 @@ "node_id": "I_kwDOEAWEP859hs3c", "title": "winget support would be great", "body": "**Describe what problem your feature request solves**:\r\n\r\nIt would make the installation and upgrading the software on modern Windows very very easy.\r\n\r\n**Describe the solution you'd like**:\r\n\r\nI'd love to be able to say `winget install --id OWASP.ThreatDragon` or similar -- and of course subsequently `winget upgrade --id OWASP.ThreatDragon` -- in order to install and upgrade the software.\r\n\r\nPS: I think there is a way to integrate this with GitHub automation, but I need to dig that up again and will comment once I find it. That way if the assets in your releases follow a particular schema a PR will automatically go to microsoft/winget-pkgs.", + "summary": "Implement winget support to simplify software installation and upgrades on modern Windows systems. \n\n1. Enable users to install software using commands like `winget install --id OWASP.ThreatDragon` and upgrade with `winget upgrade --id OWASP.ThreatDragon`.\n2. Investigate integration with GitHub automation to streamline the process; ensure that release assets conform to a specified schema for automatic PR submissions to microsoft/winget-pkgs.\n\nFirst steps:\n- Research the winget CLI documentation to understand command usage and requirements.\n- Develop a manifest file for OWASP ThreatDragon that adheres to winget's specifications.\n- Test the installation and upgrade commands locally to verify functionality.\n- Explore GitHub Actions to automate the PR creation process for updates.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/848", @@ -10518,10 +10867,10 @@ 153 ], "labels": [ - 71, - 78, 80, - 81 + 81, + 78, + 71 ] } }, @@ -10534,6 +10883,7 @@ "node_id": "I_kwDOEAWEP8596XGk", "title": "Long term file format", "body": "**Describe what problem your feature request solves**:\r\nThe Threat Dragon file format / JSON schema uses two related but incompatible versions for 1.x and 2.x, and neither of these is a format other tools can use\r\n\r\n**Describe the solution you'd like**:\r\nThreat Dragon version 3.x should use a standard file format instead of the existing incompatible versions 1.x and versions 2.x formats\r\n[Open Threat Model](https://github.com/iriusrisk/OpenThreatModel) file format has been released and could be considered alongside CycloneDx\r\n\r\n**Additional context**:\r\n\r\n* [CycloneDx](https://cyclonedx.org/) is an [ECMA standard 424](https://ecma-international.org/publications-and-standards/standards/ecma-424/)\r\n* CycloneDx can provide the definition of the threat model file, as a TMBOM (Threat Model Bill of Materials)\r\n* There is also a [CyclonDX github issue](https://github.com/CycloneDX/specification/issues/462) for tracking this work\r\n* The TMBOM fields need to be defined in the [CycloneDx Property Taxonomy](https://github.com/CycloneDX/cyclonedx-property-taxonomy)\r\n* The closest CycloneDX BOM to a TMBOM is the [example of an HBOM](https://github.com/CycloneDX/bom-examples/blob/master/HBOM/PCIe-SATA-adapter-board/bom.json) (Hardware BOM)\r\n* An informal workstream (#[workstream-threat-model](https://cyclonedx.slack.com/archives/C073KMCQMUG)) has been set up on the OWASP CycloneDX Slack which meets regularly\r\n* [Agenda and minutes are available](https://docs.google.com/document/d/1OxM2KDAKmkY6Z-K0Qe5Gce7jMWWnCU1Osd3P4V8t8yQ/) and [TMBOM Use Cases and Requirements](https://docs.google.com/document/d/1gwfk0GHafuUE06GRc31kkQ-NTB2w8ZatYGKUm7Ah-5Q/)\r\n\r\n", + "summary": "Create a standardized file format for Threat Dragon version 3.x to replace the incompatible 1.x and 2.x JSON schemas. Evaluate the Open Threat Model file format and CycloneDX as potential solutions.\n\n1. Review the CycloneDX specification, particularly ECMA standard 424, which outlines the Threat Model Bill of Materials (TMBOM) structure.\n2. Analyze the TMBOM fields and their definitions in the CycloneDX Property Taxonomy.\n3. Compare the TMBOM requirements with the existing CycloneDX BOM examples, specifically the HBOM for reference.\n4. Participate in the informal workstream on OWASP CycloneDX Slack to gather insights and collaborate with others involved in this effort.\n5. Consult the provided agenda and minutes for ongoing discussions and document any relevant use cases and requirements for the TMBOM. \n\nBy standardizing the file format, improve interoperability with other tools and enhance usability for users of Threat Dragon.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/850", @@ -10551,9 +10901,9 @@ 43 ], "labels": [ - 71, + 89, 75, - 89 + 71 ] } }, @@ -10566,6 +10916,7 @@ "node_id": "I_kwDOEAWEP85-aL-p", "title": "Refactor Providers to use standard model", "body": "The existing providers have been maintained against the initial Github schema. A refactor to maintain a standard repository schema is necessary to simplify the code base.", + "summary": "Refactor the Providers to conform to the standard repository schema. Analyze the current implementation of the providers that are based on the initial GitHub schema. Identify discrepancies and areas for improvement to align with the new model.\n\n1. Review the standard repository schema and document the differences between it and the current provider implementations.\n2. Update data structures and methods in the providers to match the standard schema requirements.\n3. Ensure that all existing functionality remains intact by writing unit tests for the current behavior.\n4. Gradually replace the old schema references in the codebase with the new standard model.\n5. Conduct thorough testing to validate that all providers function correctly after the refactor.\n\nBy following these steps, enhance the maintainability and clarity of the codebase, which will streamline future development efforts.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/860", @@ -10583,8 +10934,8 @@ 141 ], "labels": [ - 74, - 88 + 88, + 74 ] } }, @@ -10597,6 +10948,7 @@ "node_id": "I_kwDOEAWEP85-mY1C", "title": "Feature Request: OPSEC Threat Modeling", "body": "**Describe what problem your feature request solves**:\r\nThere is no OPSEC based threat modeling unless its in context SDLC\r\n\r\n**Describe the solution you'd like**:\r\nI would like an option to use Threat-Dragon to create OPSEC or Operation Security based threat model\r\n\r\n**Additional context**:\r\nMost threat models are defined in terms of software developers life cycle or IT. OPSEC applies to IT and all domains especially physical. I could find no OPSEC threat models and think this could easily be added to Threat-Dragon and increase the user baser. The ability to easily create OPSEC threat models and display them especially for the physical domain has a lot of value. See an good example here: https://old.reddit.com/r/opsec/comments/eh6uvc/want_to_learn_opsec_as_a_total_beginner_start_here/\r\n\r\n\r\n", + "summary": "Implement OPSEC Threat Modeling feature in Threat-Dragon. \n\n1. Identify the current limitations in Threat-Dragon regarding OPSEC-based threat modeling. \n2. Research existing OPSEC frameworks and methodologies to incorporate relevant aspects into the tool.\n3. Develop a user interface option for creating OPSEC threat models, ensuring it accommodates both IT and physical domains.\n4. Create templates or guidelines for users to easily build OPSEC threat models.\n5. Validate the feature with potential users to gather feedback and improve functionality.\n\nThis enhancement will broaden Threat-Dragon’s applicability and attract a wider user base by addressing a gap in existing threat modeling tools.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/861", @@ -10612,8 +10964,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 75 + 75, + 71 ] } }, @@ -10626,6 +10978,7 @@ "node_id": "I_kwDOEAWEP85-5wp2", "title": "Create AppVeyor pipeline for signing Windows installer", "body": "**Describe what problem your feature request solves**:\r\nAs of July 2023 the Certificate Authority/Brower Forum’s [CA/B Forum](https://cabforum.org/baseline-requirements-code-signing/) requires all code signing private keys be stored on secure hardware.\r\nThe cost is prohibitive, $175 to $250 per year\r\n\r\n**Describe the solution you'd like**:\r\nWindows installer signed\r\n\r\n**Additional context**:\r\nThe [How to Sign a Windows App in Electron Builder](https://codesigningstore.com/how-to-sign-a-windows-app-in-electron-builder) describes what needs to be done to sign the Threat Dragon application.\r\nThe existing certificate runs out on 20th February 2024\r\n", + "summary": "Create an AppVeyor pipeline for signing the Windows installer for the Threat Dragon application. \n\n1. Store code signing private keys on secure hardware to comply with CA/B Forum requirements effective July 2023. \n2. Evaluate and select a cost-effective hardware security module (HSM) solution, considering expenses between $175 to $250 per year.\n3. Implement the signing process using the guidance from the \"How to Sign a Windows App in Electron Builder\" documentation.\n4. Configure the AppVeyor pipeline to automate the signing of the Windows installer.\n5. Ensure that the current certificate is renewed before it expires on February 20, 2024.\n\nFirst steps:\n- Research and choose an appropriate HSM provider.\n- Set up an AppVeyor account and create a new pipeline configuration.\n- Write and test the script for the signing process within the pipeline.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/872", @@ -10641,9 +10994,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, + 90, 87, - 90 + 71 ] } }, @@ -10656,6 +11009,7 @@ "node_id": "I_kwDOEAWEP85_m2Mp", "title": "Support Github/Bitbucket/Gitlab in Desktop", "body": "I am unclear why we would not support the various repositories in Desktop mode, so proposing enabling.\r\n\r\nWelcome feedback.", + "summary": "Enable support for GitHub, Bitbucket, and GitLab in Desktop mode. Assess the current architecture of the Desktop application to identify integration points for these repositories. \n\nFirst, gather information on the existing APIs for GitHub, Bitbucket, and GitLab. Review the authentication methods currently used in Desktop mode and determine if they can accommodate the additional services. \n\nNext, implement a plan to create a user interface for repository selection, ensuring that it allows users to authenticate and manage repositories seamlessly. Consider leveraging existing libraries or SDKs for each service to streamline the integration process. \n\nFinally, solicit feedback from users and developers to refine the functionality and address any potential issues that arise during the implementation phase.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/883", @@ -10671,9 +11025,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, + 91, 78, - 91 + 71 ] } }, @@ -10686,6 +11040,7 @@ "node_id": "I_kwDOEAWEP86BTduO", "title": "Provide translation for Demo Model Page", "body": "**Describe what problem your feature request solves**:\r\nCurrently , In the [Select Demo Page] User can't access the Demo models name in any language other than English as shown below\r\n\r\n**Describe the solution you'd like**:\r\nFor Better User Experience the Demo models should also have used i18n \r\n\r\n**Additional context**:\r\n\r\n![Screenshot 2024-03-05 192651](https://github.com/OWASP/threat-dragon/assets/107138786/7a465bb4-f2a0-4a26-bdd1-d637d28137b2)\r\n", + "summary": "Implement internationalization (i18n) for the Demo Model Page. \n\n1. Identify the current implementation of the Demo Model names that restricts access to English only.\n2. Analyze the existing codebase to locate where the Demo Model names are defined and rendered.\n3. Create a localization file that includes translations for the Demo Model names in multiple languages.\n4. Modify the rendering logic to pull the appropriate translation based on the user's selected language.\n5. Test the changes to ensure that the Demo Model names display correctly in different languages.\n6. Update the documentation to reflect the new i18n implementation.\n\nEnhance user experience by enabling multilingual support for Demo Model names.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/902", @@ -10701,8 +11056,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 73 + 73, + 71 ] } }, @@ -10711,10 +11066,11 @@ "pk": 373, "fields": { "nest_created_at": "2024-09-11T20:09:44.344Z", - "nest_updated_at": "2024-09-11T20:09:44.344Z", + "nest_updated_at": "2024-09-13T15:17:02.857Z", "node_id": "I_kwDOEAWEP86BdDcU", "title": "Conform to OCSF schema", "body": "**Describe what problem your feature request solves**:\r\nThe Open Cybersecurity Schema Framework ([OCSF](https://schema.ocsf.io/1.1.0/)) provides categories to organize event classes, which can be consumed by various operational monitoring tools.\r\n\r\n**Describe the solution you'd like**:\r\nProvide fields in the threat that conform to OCSF schema [Vulnerability Details object](https://schema.ocsf.io/1.1.0/objects/vulnerability) which is part of the [Vulnerability Finding Class](https://schema.ocsf.io/1.1.0/classes/vulnerability_finding)\r\n\r\n**Additional context**:\r\n\r\n", + "summary": "Conform to the OCSF schema by implementing fields in the threat data that adhere to the OCSF Vulnerability Details object. This enhancement will enable better organization and categorization of event classes, facilitating integration with various operational monitoring tools.\n\n1. Review the OCSF schema documentation, specifically the [Vulnerability Details object](https://schema.ocsf.io/1.1.0/objects/vulnerability) and the [Vulnerability Finding Class](https://schema.ocsf.io/1.1.0/classes/vulnerability_finding).\n2. Identify the necessary fields from the Vulnerability Details object that need to be implemented in the existing threat data structure.\n3. Modify the data model to include these fields, ensuring compatibility with the OCSF schema.\n4. Test the changes to ensure that the new fields are correctly populated and formatted according to the OCSF specifications.\n5. Update documentation to reflect the new schema conformance and provide examples of the updated threat data structure.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/904", @@ -10722,15 +11078,16 @@ "sequence_id": 2171877140, "is_locked": false, "lock_reason": "", - "comments_count": 0, + "comments_count": 1, "closed_at": null, "created_at": "2024-03-06T16:08:53Z", - "updated_at": "2024-03-06T16:08:54Z", + "updated_at": "2024-09-13T07:01:18Z", "author": 43, "repository": 502, "assignees": [], "labels": [ - 71 + 71, + 1115 ] } }, @@ -10743,6 +11100,7 @@ "node_id": "I_kwDOEAWEP86Bdofc", "title": "Running threat dragon behind nginx", "body": "**Describe the bug**:\r\nWhen deploying Threat Dragon to Kubernetes, I can't get the Github Oauth to work. I guess it could be a nginx config needed?\r\n\r\n**Expected behaviour**:\r\nLogin with Github Oauth should work behind a proxy.\r\n\r\n**Logs**:\r\n\r\n```\r\nerror: controllers/auth.js: {\"service\":\"threat-dragon\",\"timestamp\":\"2024-03-06 17:05:09\"}\r\nerror: Unexpected token ' in JSON at position 0 {\"service\":\"threat-dragon\",\"stack\":\"SyntaxError: Unexpected token ' in JSON at position 0\\n at JSON.parse ()\\n at getPrimaryKey (/app/td.server/dist/helpers/encryption.helper.js:23:19)\\n at Object.encryptPromise (/app/td.server/dist/helpers/encryption.helper.js:99:13)\\n at _callee$ (/app/td.server/dist/helpers/jwt.helper.js:21:47)\\n at tryCatch (/app/td.server/node_modules/@babel/runtime/helpers/regeneratorRuntime.js:45:16)\\n at Generator. (/app/td.server/node_modules/@babel/runtime/helpers/regeneratorRuntime.js:133:17)\\n at Generator.next (/app/td.server/node_modules/@babel/runtime/helpers/regeneratorRuntime.js:74:21)\\n at asyncGeneratorStep (/app/td.server/node_modules/@babel/runtime/helpers/asyncToGenerator.js:3:24)\\n at _next (/app/td.server/node_modules/@babel/runtime/helpers/asyncToGenerator.js:22:9)\\n at /app/td.server/node_modules/@babel/runtime/helpers/asyncToGenerator.js:27:7\\n at new Promise ()\\n at Object. (/app/td.server/node_modules/@babel/runtime/helpers/asyncToGenerator.js:19:12)\\n at Object.createAsync (/app/td.server/dist/helpers/jwt.helper.js:49:17)\\n at _callee$ (/app/td.server/dist/controllers/auth.js:55:42)\\n at tryCatch (/app/td.server/node_modules/@babel/runtime/helpers/regeneratorRuntime.js:45:16)\\n at Generator. (/app/td.server/node_modules/@babel/runtime/helpers/regeneratorRuntime.js:133:17)\",\"timestamp\":\"2024-03-06 17:05:09\"}\r\nerror: undefined {\"service\":\"threat-dragon\",\"timestamp\":\"2024-03-06 17:05:09\"}\r\n```\r\n", + "summary": "Investigate GitHub OAuth issue in Threat Dragon behind Nginx. \n\n1. Review Nginx configuration to ensure proper handling of OAuth callbacks. Confirm that the proxy settings are correctly forwarding requests, especially the Authorization header and any required cookies.\n\n2. Check the application's environment variables related to GitHub OAuth (e.g., `GITHUB_CLIENT_ID`, `GITHUB_CLIENT_SECRET`, `GITHUB_CALLBACK_URL`). Ensure they are correctly set and match the settings in the GitHub OAuth application.\n\n3. Analyze error logs for JSON parsing issues. Specifically, look at the error `Unexpected token ' in JSON at position 0`. This suggests malformed JSON is being processed. Trace back to where the JSON is generated or received to identify potential issues.\n\n4. Test the OAuth flow directly without Nginx to determine if the problem lies with the application or the proxy configuration.\n\n5. If issues persist, enable detailed logging in both Threat Dragon and Nginx to capture more information during the OAuth flow.\n\n6. Consult Threat Dragon and Nginx documentation for additional configuration settings that may be necessary for OAuth to function correctly behind a proxy. \n\nStart with validating Nginx settings and examining the application's environment variables for correctness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/905", @@ -10758,8 +11116,8 @@ "repository": 502, "assignees": [], "labels": [ - 73, - 88 + 88, + 73 ] } }, @@ -10772,6 +11130,7 @@ "node_id": "I_kwDOEAWEP86BspRR", "title": "GITHUB_ENTERPRISE_PORT incorrect placement due to bug in octonode", "body": "**Describe the bug**:\r\nThreat Dragon is improperly constructing GitHub Enterprise (GHE) URLs (after initial OAuth login), leading to `404` errors on GHE, with Threat Dragon then returning `500` errors.\r\n\r\nIt appears to start here:\r\n- https://github.com/OWASP/threat-dragon/blob/622338453c5dca4d01aa78646d1a9f382f3a0e8b/td.server/src/repositories/githubrepo.js#L9-L10\r\n\r\nThe inclusion of both the base path (`/api/v3`) and the `port` when constructing the URL for octonode causes the URL to be invalid.\r\n\r\nUsing `github.company.com` as the example...\r\n\r\n- this is what `enterpriseOpts` is set to, then:\r\n\r\n`enterpriseOpts { hostname: 'github.company.com/api/v3', port: 443, protocol: 'https' }`\r\n\r\n- So, then in [Octonode](https://github.com/pksunkara/octonode/blob/v0.10.2/lib/octonode/client.js#L221-L229):\r\n\r\n```javascript\r\n _url = url.format({\r\n protocol: urlFromPath.protocol || this.options && this.options.protocol || \"https:\",\r\n auth: urlFromPath.auth || (this.token && this.token.username && this.token.password ? this.token.username + \":\" + this.token.password : ''),\r\n hostname: urlFromPath.hostname || this.options && this.options.hostname || \"api.github.com\",\r\n port: urlFromPath.port || this.options && this.options.port,\r\n pathname: urlFromPath.pathname,\r\n query: query\r\n });\r\n```\r\n\r\n- and `this.options.hostname` is `http://github.company.com/api/v3`, so `_url` ends up being\r\n`https://github.company.com/api/v3:443/user`\r\n\r\n**Expected behaviour**:\r\nThreat Dragon should properly construct the GHE URL.\r\n\r\n**Environment**:\r\n\r\n- Version: v2.2.0\r\n- Platform: Web App\r\n- OS: Linux\r\n- Browser: N/A\r\n\r\n**To Reproduce**:\r\n- Enable use of GitHub Enterprise by setting `GITHUB_ENTERPRISE_HOSTNAME` in Threat Dragon's env\r\n- Login with GitHub in Threat Dragon UI\r\n- OAuth flow completes, but fetching `/api/v3/user` returns an error\r\n- Observe `td.server` logs, displaying error similar to below:\r\n\r\n```\r\nerror: Not Found {\r\n \"body\": {\r\n \"documentation_url\": \"https://docs.github.com/enterprise-server@3.8/rest\",\r\n \"message\": \"Not Found\"\r\n },\r\n \"headers\": {\r\n \"access-control-allow-origin\": \"*\",\r\n \"access-control-expose-headers\": \"ETag, Link, Location, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Used, X-RateLimit-Resource, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval, X-GitHub-Media-Type, X-GitHub-SSO, X-GitHub-Request-Id, Deprecation, Sunset\",\r\n \"connection\": \"close\",\r\n \"content-length\": \"96\",\r\n \"content-security-policy\": \"default-src 'none'\",\r\n \"content-type\": \"application/json; charset=utf-8\",\r\n \"date\": \"Fri, 08 Mar 2024 12:18:19 GMT\",\r\n \"referrer-policy\": \"origin-when-cross-origin, strict-origin-when-cross-origin\",\r\n \"server\": \"nginx/1.14.2\",\r\n \"strict-transport-security\": \"max-age=31536000; includeSubdomains\",\r\n \"x-accepted-oauth-scopes\": \"repo\",\r\n \"x-content-type-options\": \"nosniff\",\r\n \"x-frame-options\": \"deny\",\r\n \"x-github-enterprise-version\": \"3.8.11\",\r\n \"x-github-media-type\": \"github.v3; format=json\",\r\n \"x-github-request-id\": \"8527aaa6-62b5-460d-aebc-a0093eadff93\",\r\n \"x-oauth-client-id\": \"b72b631276646ddd4635\",\r\n \"x-oauth-scopes\": \"repo\",\r\n \"x-ratelimit-limit\": \"14000\",\r\n \"x-ratelimit-remaining\": \"13999\",\r\n \"x-ratelimit-reset\": \"1709903899\",\r\n \"x-ratelimit-resource\": \"core\",\r\n \"x-ratelimit-used\": \"1\",\r\n \"x-runtime-rack\": \"0.031289\",\r\n \"x-xss-protection\": \"0\"\r\n },\r\n \"service\": \"threat-dragon\",\r\n \"statusCode\": 404,\r\n \"timestamp\": \"2024-03-08 07:18:19\"\r\n}\r\n```\r\n\r\n- Checking the GHE side, we see that the URL is actually:\r\n`https://github.company.com/api/v3:443/user`", + "summary": "Fix the incorrect construction of GitHub Enterprise (GHE) URLs in Threat Dragon due to a bug in octonode. The issue arises from combining the base path (`/api/v3`) with the hostname, resulting in malformed URLs such as `https://github.company.com/api/v3:443/user`, leading to `404` errors on GHE and subsequent `500` errors in Threat Dragon.\n\n**Steps to tackle the problem:**\n\n1. **Identify the Source of the Bug:**\n - Review the code in `githubrepo.js`, specifically lines 9-10, where `enterpriseOpts` is constructed. Ensure the `hostname` does not include the base path.\n\n2. **Modify `enterpriseOpts`:**\n - Adjust the construction of `enterpriseOpts` to separate the `hostname` from the API version. For instance, set `hostname` to `github.company.com` and handle the API version separately.\n\n3. **Verify URL Construction in Octonode:**\n - Examine how octonode constructs the URL in `client.js` (lines 221-229). Ensure that the `hostname` and `port` are set correctly without the base path. \n\n4. **Test the Fix:**\n - Enable GitHub Enterprise in Threat Dragon and perform the OAuth login to verify that the constructed URL is now valid (e.g., `https://github.company.com/user` instead of using the incorrect format).\n\n5. **Update Documentation:**\n - Document the expected format of `GITHUB_ENTERPRISE_HOSTNAME` in Threat Dragon's environment settings to prevent future issues.\n\nBy implementing these steps, ensure Threat Dragon properly constructs GHE URLs and resolves the current errors.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/907", @@ -10789,8 +11148,8 @@ 157 ], "labels": [ - 75, - 88 + 88, + 75 ] } }, @@ -10799,10 +11158,11 @@ "pk": 376, "fields": { "nest_created_at": "2024-09-11T20:09:49.356Z", - "nest_updated_at": "2024-09-11T20:09:49.356Z", + "nest_updated_at": "2024-09-13T03:53:47.416Z", "node_id": "I_kwDOEAWEP86E-6UZ", "title": "Optional entity properties are not shown in reports", "body": "**Describe what problem your feature request solves**:\r\n\r\nSome of the optional entity properties (such as `protocol`, `isEncrypted`, `isPublicNetwork`) can be edited in the diagram editing page but they do not appear in the report page.\r\n\r\n**Describe the solution you'd like**:\r\n\r\nEntity properties are listed with the entity, perhaps between the description and the list of threats.\r\n\r\n**Additional context**:\r\n\r\nHere's a screenshot of something I hacked together showing the properties of one of the data flow entities of the V2 demo model:\r\n\r\n![image](https://github.com/OWASP/threat-dragon/assets/5932424/72092476-beb6-48bb-84ab-2cd64677d17a)\r\n\r\nIf it looks promising, I can finish it off and open a pull request.", + "summary": "Implement a feature to display optional entity properties in reports. \n\nIdentify the missing optional properties, such as `protocol`, `isEncrypted`, and `isPublicNetwork`, that are editable on the diagram editing page but not shown in the report. Modify the report generation logic to include these properties, positioning them between the entity description and the list of threats.\n\nBegin by:\n\n1. Reviewing the current report generation code to locate where entity properties are fetched and displayed.\n2. Adding logic to retrieve and display the optional properties for each entity.\n3. Testing the changes to ensure that the new properties are correctly rendered in the report.\n4. Creating a pull request with the updates for further review and integration. \n\nConsider user interface implications and ensure that the report remains clear and informative with the added details.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/937", @@ -10813,17 +11173,15 @@ "comments_count": 2, "closed_at": null, "created_at": "2024-04-08T12:33:50Z", - "updated_at": "2024-08-23T09:29:52Z", + "updated_at": "2024-09-12T07:51:58Z", "author": 158, "repository": 502, - "assignees": [ - 158 - ], + "assignees": [], "labels": [ 71, - 80, 84, - 87 + 87, + 80 ] } }, @@ -10836,6 +11194,7 @@ "node_id": "I_kwDOEAWEP86GGHeM", "title": "server CSP", "body": "**Describe what problem your feature request solves**:\r\nThe server needs to implement a content security policy\r\n\r\n**Describe the solution you'd like**:\r\nuse the package [express-csp-header](https://www.npmjs.com/package/express-csp-header)\r\nor alternatively extend helmet: `td.server/src/config/securityheaders.config.js`\r\n\r\n**Additional context**:\r\nIt would be good to add NONCE\r\nSome incomplete work was done to add CSP in pull request #1040 \r\n", + "summary": "Implement a Content Security Policy (CSP) for the server. Utilize the `express-csp-header` package or enhance the existing security headers configuration in `td.server/src/config/securityheaders.config.js` with CSP features. \n\n1. Assess the current state of CSP implementation from pull request #1040.\n2. Choose between implementing `express-csp-header` or extending Helmet for CSP functionality.\n3. If using `express-csp-header`, install the package and configure it in the server setup, ensuring to define appropriate CSP directives.\n4. If extending Helmet, modify the security headers configuration file to include CSP directives and consider adding a NONCE for inline scripts.\n5. Test the implementation thoroughly to ensure that it meets the desired security standards and does not break existing functionalities.", "state": "open", "state_reason": "reopened", "url": "https://github.com/OWASP/threat-dragon/issues/950", @@ -10851,8 +11210,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 80 + 80, + 71 ] } }, @@ -10865,6 +11224,7 @@ "node_id": "I_kwDOEAWEP86G4-d-", "title": "Implement mousewheel button dragging for graph panning", "body": "**Describe what problem your feature request solves**:\r\nThe current graph panning method requires a combination of keyboard and mouse inputs, and mousewheel button is redundant with left mouse button.\r\n\r\n**Describe the solution you'd like**:\r\nUsers should be able to press down the mousewheel button and move the mouse to pan around the graph.", + "summary": "Implement mousewheel button dragging for graph panning. \n\n**Problem to solve**: Eliminate the need for combining keyboard and mouse inputs for graph panning, as the current method is cumbersome and the mousewheel button is underutilized.\n\n**Proposed solution**: Enable users to click and hold the mousewheel button to drag and pan the graph. \n\n**Technical details**:\n1. Capture the mousewheel button press event.\n2. Track mouse movement while the button is pressed.\n3. Update the graph's view based on the relative mouse movement.\n4. Ensure smooth panning experience, possibly by implementing a throttling mechanism to limit the update frequency.\n\n**First steps**:\n1. Modify the event listener to include mousewheel button down and up events.\n2. Implement logic to track mouse position changes while the button is held down.\n3. Adjust the graph rendering logic to respond to these position changes accordingly.\n4. Test functionality across different browsers and devices to ensure compatibility.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/954", @@ -10880,9 +11240,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, + 83, 75, - 83 + 71 ] } }, @@ -10895,6 +11255,7 @@ "node_id": "I_kwDOEAWEP86Jqphs", "title": "Translate to Farsi", "body": "**Describe what problem your feature request solves**:\r\nIt would be good to have the translation to Farsi\r\n\r\n**Describe the solution you'd like**:\r\nTranslate into a new file using the pull request #925 'Vue I18n English Localization to Malay (MS)' as an example of what files need to be modified\r\n\r\n**Additional context**:\r\n\r\n", + "summary": "Translate the feature request into Farsi. \n\n**Problem to Solve**: Provide Farsi translation for better accessibility and user experience.\n\n**Proposed Solution**: Create a new file for Farsi translation. Use pull request #925, which details the translation of Vue I18n English Localization to Malay (MS), as a reference for identifying necessary file modifications.\n\n**Next Steps**:\n1. Review pull request #925 to understand the file structure and changes made.\n2. Identify the original language files that require translation to Farsi.\n3. Create a new Farsi localization file based on the identified files.\n4. Implement the translations, ensuring adherence to localization standards.\n5. Submit a pull request with your changes for review.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/962", @@ -10923,6 +11284,7 @@ "node_id": "I_kwDOEAWEP86K606F", "title": "Two undo/redo methods do not join up", "body": "**Describe the bug**:\r\nThere is a pull down menu item which can undo/redo text\r\nand there is also a undo/redo for the components on the diagram\r\n\r\n**Expected behaviour**:\r\nwhen redo/undo is selected from the pull down menu, if the drawing is in focus then the drawing package undo/redo buttons should be applied to the drawing components. If drawing component's attributes are changed then this should have redo/undo applied from the drawing package buttons\r\n\r\n**Environment**:\r\n\r\n- Version: 2.x\r\n- Platform: Desktop App\r\n- OS: any\r\n- Browser: electron\r\n\r\n**To Reproduce**:\r\n\r\n1. open a diagram in the desktop app\r\n2. observe undo/redo from within the drawing package for a component\r\n3. modify the name of the component, observe redo/undo from pull down\r\n4. use undo/redo from within the drawing package and observe node name changing, but not text box\r\n5. observe that pull down menu redo/undo does not apply to movement + changes to drawing components\r\n\r\n**Any additional context, screenshots, etc**:\r\n", + "summary": "Fix the disconnect between the two undo/redo methods in the application. The current implementation has separate undo/redo functionalities for text and diagram components, which leads to inconsistent behavior.\n\n1. Ensure that when the drawing area is in focus, the pull-down menu's undo/redo actions apply to the drawing components. \n2. When modifying component attributes (e.g., name changes), ensure that these actions are properly captured and can be undone/redone through both the drawing package buttons and the pull-down menu options.\n\n### Steps to Tackle the Problem:\n\n- **Review Existing Code**: Analyze the current implementations of both undo/redo functionalities to identify how they manage state changes and what triggers their respective actions.\n- **Design Unified Logic**: Create a cohesive logic that links the two undo/redo systems. Investigate how to trigger the drawing package's undo/redo methods when the pull-down menu is used and vice versa.\n- **Implement Event Listeners**: Add event listeners or hooks that detect when an action is performed in one system and trigger the corresponding action in the other.\n- **Test Scenarios**: Create test cases covering various scenarios, such as component modifications, movement actions, and interactions with both undo/redo systems to ensure consistent behavior.\n- **Documentation**: Update the documentation to reflect the new behavior and guide users on how to use the unified undo/redo functionality effectively. \n\nThis will enhance the user experience by ensuring that changes made in the drawing components are consistently managed across both methods.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/972", @@ -10941,8 +11303,8 @@ ], "labels": [ 80, - 87, - 88 + 88, + 87 ] } }, @@ -10955,6 +11317,7 @@ "node_id": "I_kwDOEAWEP86LNCuo", "title": "Allow user to specify a commit message to be used", "body": "**Describe what problem your feature request solves**:\r\n\"Created by OWASP Threat Dragon\" and \"Updated by OWASP Threat Dragon\" are fine for playing around, but are not enough for a real production use.\r\n\r\n**Describe the solution you'd like**:\r\nI would like to be able to write a commit message myself while saving changes in TD. The feature should be disabled by default but has 2 modi if activated via ENV: `enabled` (you can write your own message) and `forced` (you have to write your own message).\r\n", + "summary": "Implement a feature to allow users to specify custom commit messages when saving changes in OWASP Threat Dragon.\n\n1. Identify the current commit message generation logic in the codebase.\n2. Introduce an environment variable (ENV) to control the feature:\n - Set the default to disabled.\n - Implement two modes: \n - `enabled`: Allow users to input their own commit messages.\n - `forced`: Mandate that users provide a custom commit message.\n3. Create a user interface element (e.g., a text input field) for entering custom commit messages.\n4. Update the save functionality to include the user-defined commit message when committing changes.\n5. Ensure backward compatibility with the existing commit message system.\n6. Write unit tests to validate the new functionality and modes.\n7. Document the new feature in the README or relevant documentation files. \n\nStart by reviewing the existing saving and commit message code to understand how to integrate the new feature effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/973", @@ -10970,8 +11333,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 73 + 73, + 71 ] } }, @@ -10984,6 +11347,7 @@ "node_id": "I_kwDOEAWEP86Pi4OO", "title": "Add new PLOT4ai diagram components", "body": "**Describe what problem your feature request solves**:\r\nPLOT4ai has a need for other diagram components as well as the existing Process, Acotr and Store components\r\n\r\n**Describe the solution you'd like**:\r\nIf the diagram is of type 'PLOT4ai' then two new component types become available for the diagram, 'Model' and 'Result'\r\n\r\n**Additional context**:\r\n\r\nImage taken from a presentation at ThreatModCon 2024 Lisbon:\r\n\"Screenshot\r\n\r\n", + "summary": "Add new components 'Model' and 'Result' to PLOT4ai diagrams. \n\n1. Identify the existing diagram structure in the PLOT4ai codebase, focusing on how components like Process, Actor, and Store are defined and rendered.\n2. Implement the logic for new component types 'Model' and 'Result' to ensure they integrate seamlessly with the current diagram functionality.\n3. Update the diagram type check to enable these new components only when the diagram type is 'PLOT4ai'.\n4. Create necessary UI elements for adding and manipulating these new components within the diagram interface.\n5. Test the integration to confirm that the new components function correctly and do not introduce any regressions. \n\nRefer to the presentation from ThreatModCon 2024 for additional context and design inspiration.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1018", @@ -11001,9 +11365,9 @@ 150 ], "labels": [ - 71, 73, - 83 + 83, + 71 ] } }, @@ -11016,6 +11380,7 @@ "node_id": "I_kwDOEAWEP86P1TUl", "title": "PDF generation from web app", "body": "**Describe what problem your feature request solves**:\r\nThe desktop app allows saving the report as PDF.\r\nFor the web app, I only found the option to print the report.\r\n\r\nIt would be nice to have a \"download PDF\" option in the web app.\r\n\r\n**Describe the solution you'd like**:\r\nNext to the \"print\" button could be a \"download\" button that auto generates and downloads a PDF file.\r\n", + "summary": "Implement a \"download PDF\" feature in the web app to enhance user experience. This feature should allow users to generate and download reports as PDF files, similar to the functionality available in the desktop app.\n\n1. Investigate existing print functionality to understand the current implementation.\n2. Choose a PDF generation library compatible with the web app's technology stack (e.g., jsPDF, html2pdf.js).\n3. Add a \"download\" button next to the \"print\" button in the report interface.\n4. Write a function that captures the report content and converts it to a PDF format using the selected library.\n5. Trigger the PDF generation when the \"download\" button is clicked and initiate the download process.\n6. Test the PDF output to ensure it matches the report format and retains all necessary data.\n\nBy following these steps, you can successfully implement the requested feature for the web application.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1022", @@ -11044,6 +11409,7 @@ "node_id": "I_kwDOEAWEP86QE7bD", "title": "apply for Best Practices badge", "body": "**Describe what problem your feature request solves**:\r\nthe OpenSSF run a [Best Practices Badge Program](https://www.bestpractices.dev/en), it is very likely that Threat Dragon follows these best practices\r\n\r\n**Describe the solution you'd like**:\r\nInspect the criteria for [gaining the badge](https://www.bestpractices.dev/en/criteria/0) and apply\r\n\r\n**Additional context**:\r\nConsider the OWASP Project Committee [Good Practice](https://owasp.org/www-committee-project/#div-practice)\r\nConsider linking directly to OWASP from the initial home page\r\nConsider applying for [OWASP Production status](https://owasp.org/www-committee-project/#div-promotions)\r\n", + "summary": "Apply for the Best Practices badge for Threat Dragon by following these steps:\n\n1. **Inspect Badge Criteria**: Review the criteria outlined for the Best Practices Badge on the official [Best Practices website](https://www.bestpractices.dev/en/criteria/0). Ensure that Threat Dragon adheres to these standards.\n\n2. **Perform Assessment**: Conduct a thorough assessment of Threat Dragon against the criteria. Document areas of compliance and identify any gaps that need addressing.\n\n3. **Implement Improvements**: Address any identified gaps to align Threat Dragon with the best practices. Focus on coding standards, security measures, documentation, and community engagement as specified in the criteria.\n\n4. **Link OWASP Resources**: Consider incorporating direct links to OWASP resources from Threat Dragon’s homepage to enhance visibility and adherence to security best practices.\n\n5. **Apply for OWASP Production Status**: Explore the requirements for achieving OWASP Production status. Prepare necessary documentation and improvements to meet these requirements.\n\n6. **Submit Application**: Once all criteria are met, formally submit an application for the Best Practices badge through the official process outlined on the Best Practices website.\n\nBy following these steps, enhance Threat Dragon’s credibility and ensure it meets industry standards for security and best practices.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1027", @@ -11073,6 +11439,7 @@ "node_id": "I_kwDOEAWEP86RGrZY", "title": "Canvas reverts to small view area on save", "body": "**Describe the bug**:\r\nWhen a diagram is saved, Antv/X6 drawing package reverts the canvas size to the size of the container view \r\nThe user is left with a confusing diagram and has to drag components to get them back into view\r\nPossible that updating to antv/x6 version 2.x will solve this, see #722 \r\n\r\n**Expected behaviour**:\r\nwhen opening a diagram, the canvas size and viewing area should match the previous sizes\r\n\r\n**Environment**:\r\n\r\n- Version: after commit 6b8f3e3\r\n- Platform: both Web App and Desktop App\r\n- OS: all of MacOS / Windows / Linux\r\n- Browser: all of chrome, firefox, safari\r\n\r\n**To Reproduce**:\r\n\r\n1. Open the example diagram\r\n2. drag a component so that the diagram canvas resizes\r\n3. save the diagram\r\n4. only a portion of the canvas is now displayed\r\n\r\n**Any additional context, screenshots, etc**:\r\n\r\nThe canvas size needs to be saved and restored when redrawn. Here is debug:\r\n\r\n```\r\ntd.vue/src/service/x6/graph/events.js\r\n@@ -8,6 +8,10 @@ import { CELL_SELECTED, CELL_UNSELECTED } from '@/store/actions/cell.js';\r\n import { THREATMODEL_MODIFIED } from '@/store/actions/threatmodel.js';\r\n import shapes from '@/service/x6/shapes/index.js';\r\n \r\n+const canvasResized = ({ width, height }) => {\r\n+ console.debug('canvas resized to width ', width, ' height ', height);\r\n+};\r\n+\r\n const edgeConnected = ({ isNew, edge }) => {\r\n\r\n@@ -142,6 +146,7 @@ const nodeAddFlow = (graph) => ({ node }) => {\r\n };\r\n \r\n const listen = (graph) => {\r\n+ graph.on('resize', canvasResized);\r\n graph.on('edge:connected', edgeConnected);\r\n\r\n@@ -157,6 +162,7 @@ const listen = (graph) => {\r\n };\r\n \r\n const removeListeners = (graph) => {\r\n+ graph.off('resize', canvasResized);\r\n graph.off('edge:connected', edgeConnected);\r\n```\r\n", + "summary": "Reproduce the bug where the canvas reverts to a small view area upon saving a diagram using the Antv/X6 drawing package. \n\n**Steps to tackle the problem**:\n\n1. **Analyze the current behavior**:\n - Confirm that when a diagram is saved, the canvas size reverts to the container's size instead of retaining the user's modifications.\n\n2. **Check the version**:\n - Consider updating Antv/X6 to version 2.x as indicated in issue #722. Ensure that this version addresses the canvas resizing issue.\n\n3. **Modify the event listeners**:\n - Investigate the event handling in `td.vue/src/service/x6/graph/events.js`.\n - Implement logic to save the canvas size before saving the diagram. This might involve adding a new listener that captures the canvas dimensions whenever it is resized.\n\n4. **Implement canvas size restoration**:\n - After saving the diagram, ensure that the previously saved canvas size is restored when the diagram is reloaded. This may involve adjusting the state management or storage mechanism for the diagram's properties.\n\n5. **Test across environments**:\n - Verify the solution on all specified platforms (MacOS, Windows, Linux) and browsers (Chrome, Firefox, Safari) to ensure consistent behavior.\n\n6. **Document the changes**:\n - Update the codebase and document the changes made to handle canvas resizing and restoration for future reference and maintenance.\n\nFollowing these steps should lead to a resolution of the canvas resizing issue after saving diagrams.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1041", @@ -11090,11 +11457,11 @@ 43 ], "labels": [ - 88, - 92, - 87, 80, - 83 + 83, + 87, + 88, + 92 ] } }, @@ -11107,6 +11474,7 @@ "node_id": "I_kwDOEAWEP86T0tja", "title": "PDF Report dialog does not show existing PDF files", "body": "**Describe the bug**:\r\n\r\nExisting PDF files are not shown in file save dialog (probably wrong file type filter, i.e., two dots before the PDF extension).\r\n\r\n**Expected behaviour**:\r\n\r\nExisting PDF files are shown in file save dialog.\r\n\r\n**Environment**:\r\nVersion 2.2.0\r\nDesktop App\r\nWindows 11\r\n\r\n**To Reproduce**:\r\n\r\nExport PDF file into directory which contains PDF files.\r\n\r\n**Any additional context, screenshots, etc**:\r\n\r\n![image](https://github.com/user-attachments/assets/3cf7c72f-801f-44dd-bd06-f939ea5ca751)\r\n![image](https://github.com/user-attachments/assets/63364e84-5ce7-4353-a0da-6203759447dc)\r\n", + "summary": "Fix the PDF report dialog to display existing PDF files. The issue stems from a likely misconfiguration in the file type filter, which may be using an incorrect format (e.g., two dots before the PDF extension).\n\n**Steps to address the issue:**\n\n1. Investigate the file type filter settings in the PDF export dialog.\n2. Correct the file extension filter for PDF files to ensure it properly recognizes existing files (e.g., change from `..pdf` to `.pdf`).\n3. Test the file dialog after making changes to confirm that existing PDF files are displayed correctly.\n4. Review the code for other file type filters to ensure consistency and prevent similar issues.\n\n**Check environment:**\n- Confirm the bug exists in version 2.2.0 on Windows 11. \n\n**Document findings and update the issue tracker with progress.**", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1062", @@ -11123,8 +11491,8 @@ "assignees": [], "labels": [ 80, - 84, - 88 + 88, + 84 ] } }, @@ -11137,6 +11505,7 @@ "node_id": "I_kwDOEAWEP86T0wOO", "title": "\"Reason for out of scope\" missing in PDF report", "body": "**Describe the bug**:\r\n\r\nPDF report does not contain \"Reason for out of scope\" for \"Out of Scope\" elements.\r\n\r\n**Expected behaviour**:\r\n\r\nPDF report contains \"Reason for out of scope\" which is important for documentation purposes.\r\n\r\n**Environment**:\r\n\r\n- Version: 2.2\r\n- Platform: Desktop App\r\n- OS: Windows 11\r\n\r\n**To Reproduce**:\r\n\r\nHave element in DFD with \"Out of Scope\" checkbox checked and \"Reason for out of scope\" filled.\r\nDo PDF export.\r\n\r\n**Any additional context, screenshots, etc**:\r\n\r\n![image](https://github.com/user-attachments/assets/48754a80-5dc4-422e-b5fd-d7d4c1b13fe8)\r\n", + "summary": "Add \"Reason for out of scope\" to PDF report for \"Out of Scope\" elements. \n\n1. Identify the section in the PDF generation code where report data is compiled.\n2. Check if the \"Reason for out of scope\" field is being retrieved from the data model.\n3. Ensure this field is included in the data being passed to the PDF generation function.\n4. Update the PDF template to accommodate and display the \"Reason for out of scope\" appropriately.\n5. Test the PDF export functionality after implementing changes to verify that the reason appears as expected.\n\nReview the PDF generation library to confirm it supports the necessary formatting for the new content. Ensure compatibility with version 2.2 on Windows 11 desktop app.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1063", @@ -11153,8 +11522,8 @@ "assignees": [], "labels": [ 80, - 84, - 88 + 88, + 84 ] } }, @@ -11167,6 +11536,7 @@ "node_id": "I_kwDOEAWEP86T1sB7", "title": "Allow resizing of canvas area", "body": "**Describe what problem your feature request solves**:\r\n\r\nI cannot increase the canvas size. I can only increase the app's window size.\r\n\r\n**Environment**\r\nVersion 2.2\r\nWindows 11\r\nDesktop App\r\n4K Monitor\r\n\r\n**Describe the solution you'd like**:\r\n\r\nI want to be able to increase the canvas size to have a larger area for modeling.\r\n\r\n**Additional context**:\r\n\r\n![2024-08-22_11h38_57](https://github.com/user-attachments/assets/2b5c83a4-47c1-46b6-b046-debbd2106330)\r\n", + "summary": "Implement a feature to allow resizing of the canvas area within the application. \n\n**Problem to Solve**: The current limitation prevents users from increasing the canvas size, restricting modeling space despite the ability to resize the app's window. This is particularly problematic for users working on 4K monitors who require more modeling area for detailed work.\n\n**Proposed Solution**: Enable users to adjust the canvas size independently from the application window size. This could involve adding a resize handle or a settings option to specify canvas dimensions.\n\n**Technical Steps**:\n1. Investigate the existing canvas rendering logic to identify where size constraints are applied.\n2. Modify the canvas initialization parameters to accept dynamic width and height inputs.\n3. Implement event listeners for mouse or touch events to allow users to drag to resize the canvas.\n4. Ensure the canvas can maintain aspect ratio or allow for freeform resizing based on user preferences.\n5. Update the application's UI to reflect the new canvas size and ensure compatibility with existing features.\n6. Test the resizing feature on various resolutions and screen sizes, particularly focusing on 4K displays.\n\nStart by reviewing the canvas rendering code and identifying the current limitations in size settings.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1064", @@ -11182,8 +11552,8 @@ "repository": 502, "assignees": [], "labels": [ - 71, - 73 + 73, + 71 ] } }, @@ -11196,6 +11566,7 @@ "node_id": "I_kwDOEAWEP86T1vq7", "title": "Improve Drag and Drop of Components", "body": "**Describe what problem your feature request solves**:\r\n\r\nI have to select the _text_ of a component in the components view in order to drag and drop it into the canvas.\r\nIt is not possible to drag and drop by clicking other parts of the component.\r\n\r\n**Environment**\r\nVersion 2.2\r\nWindows 11\r\nDesktop App\r\n4K Monitor\r\n\r\n**Describe the solution you'd like**:\r\n\r\nI want to select any part of the component for drag and drop.\r\n\r\n**Additional context**:\r\n\r\n![2024-08-22_11h50_31](https://github.com/user-attachments/assets/7d5df0aa-dc18-49b1-8e28-52eb519d136a)\r\n\r\n", + "summary": "Improve the drag and drop functionality for components in the desktop app. Currently, users must click on the text of a component in the components view to initiate a drag and drop action. This limitation hinders usability, as users should be able to click anywhere on the component to perform the drag and drop.\n\n**Technical Details:**\n- Examine the current event listeners associated with the component's drag-and-drop functionality.\n- Identify the areas in the UI where click events are registered and modify them to include the entire area of the component, not just the text.\n- Ensure that the drag-and-drop behavior is consistent across different components and does not interfere with other UI interactions.\n\n**First Steps:**\n1. Review the existing code handling the drag-and-drop feature, focusing on event listeners for mouse interactions.\n2. Implement a test case that allows for dragging from different parts of the component.\n3. Conduct usability testing to ensure that the improved functionality meets user expectations.\n4. Document the changes made and update any relevant UI guidelines.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1065", @@ -11211,9 +11582,9 @@ "repository": 502, "assignees": [], "labels": [ - 71, 73, - 83 + 83, + 71 ] } }, @@ -11222,10 +11593,11 @@ "pk": 390, "fields": { "nest_created_at": "2024-09-11T20:10:18.569Z", - "nest_updated_at": "2024-09-11T20:10:18.569Z", + "nest_updated_at": "2024-09-13T16:57:38.012Z", "node_id": "I_kwDOEAWEP86T1yQ7", "title": "Add keyboard shortcut for save [CTRL + S]", "body": "**Describe what problem your feature request solves**:\r\n\r\nI have to use the UI button in order to save.\r\n\r\n**Describe the solution you'd like**:\r\n\r\nI would like to simply hit CTRL + S.\r\n\r\n**Additional context**:\r\n\r\n![image](https://github.com/user-attachments/assets/ce6a17e3-5169-4752-bcc0-3fb5e1105385)\r\n\r\n", + "summary": "Implement a keyboard shortcut for saving files using CTRL + S. \n\n**Problem**: Users are currently required to click a UI button to save, which is inefficient and disrupts workflow.\n\n**Solution**: Enable the CTRL + S keyboard shortcut to trigger the save functionality.\n\n**Technical Details**:\n1. Identify the area of the codebase where the save function is implemented.\n2. Create an event listener for keyboard events that detects when CTRL + S is pressed.\n3. Prevent the default action of the event to avoid browser behavior (like opening the save dialog).\n4. Call the existing save function when the shortcut is activated.\n\n**First Steps**:\n1. Review the existing save function implementation.\n2. Locate the appropriate module or component where keyboard event listeners can be added.\n3. Implement the event listener for CTRL + S.\n4. Test the functionality to ensure it works as intended across different browsers.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1066", @@ -11236,10 +11608,12 @@ "comments_count": 1, "closed_at": null, "created_at": "2024-08-22T09:56:14Z", - "updated_at": "2024-08-23T10:02:36Z", + "updated_at": "2024-09-13T14:12:50Z", "author": 162, "repository": 502, - "assignees": [], + "assignees": [ + 136 + ], "labels": [ 71, 80, @@ -11256,6 +11630,7 @@ "node_id": "I_kwDOEAWEP86T2dE0", "title": "Improve selection of edges and creation of bendpoints", "body": "**Describe what problem your feature request solves**:\r\n\r\nCurrently, it does not feel intuitive to select a data flow edge or a trust boundary. I have to select the text or the endpoint in order to access the properties. If i click anywhere else on the line, a bend point is immediately created. Often, I don't want to create a bend point but just select the data flow / trust boundary to edit properties.\r\n\r\n**Describe the solution you'd like**:\r\n\r\n I would prefer a UX similar to Miro. When clicking the edge, the properties are shown. When clicking on the circle between two bend points, a new bend point is created.\r\n\r\n**Additional context**:\r\n\r\n\r\n- In Threat Dragon, clicking on line immediately creates bend point.\r\n![2024-08-22_13h07_53](https://github.com/user-attachments/assets/05c64041-eea1-47cb-9b3b-ef792c1f76f6)\r\n\r\n- In Miro, clicking on line selects the line first and enables to edit properties. Bend points can then be created afterwards.\r\n![2024-08-22_13h08_30](https://github.com/user-attachments/assets/3e2def12-75c8-4039-8bfc-20f7eff830cc)\r\n\r\n", + "summary": "Enhance edge selection and bend point creation in Threat Dragon.\n\n**Identify the problem**: \nModify the current selection mechanism for data flow edges and trust boundaries to improve user experience. Currently, users must click on text or endpoints to access properties, while clicking anywhere else on the line creates an unintended bend point.\n\n**Proposed solution**: \nImplement a selection system similar to Miro. Enable property editing by clicking on the edge itself, while allowing bend points to be added only when clicking on designated areas between bend points.\n\n**Technical details**:\n1. **Event Handling**: Adjust the mouse click event listeners for edge elements to differentiate between selecting the edge and creating bend points.\n2. **Hit Testing**: Implement hit testing to identify if the click is on the edge or a specific area between bend points. Use bounding box calculations to determine click areas.\n3. **UI Feedback**: Provide visual feedback when the edge is selected (e.g., highlight or change cursor) to indicate that the properties can be edited.\n4. **Property Panel**: Ensure that the properties panel updates with the correct edge information when an edge is selected.\n\n**First steps**:\n1. Review the current event handling code for edges and bend points.\n2. Define the clickable areas for edges and bend points in the SVG or Canvas rendering.\n3. Implement hit testing logic to distinguish between a selection and a bend point creation.\n4. Test the new interaction model for usability and functionality across different scenarios.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1067", @@ -11271,10 +11646,10 @@ "repository": 502, "assignees": [], "labels": [ - 71, 73, 83, - 87 + 87, + 71 ] } }, @@ -11287,6 +11662,7 @@ "node_id": "I_kwDOEAWEP86T2fPJ", "title": "I have to click undo five times to actually undo the action", "body": "**Describe the bug**:\r\n\r\nWhen creating a data flow by double clicking a process, I have to execute undo five times in order to actually undo the action.\r\n\r\n**Expected behaviour**:\r\n\r\nOnly clicking once should be enough.\r\n\r\n**Environment**:\r\n\r\n- Version: 2.2\r\n- Platform: Desktop App\r\n- OS: Windows 11\r\n\r\n\r\n**To Reproduce**:\r\n\r\n\r\n1. Create Process Element.\r\n2. Double click on it.\r\n3. Click on undo five times.\r\n\r\n**Any additional context, screenshots, etc**:\r\n\r\n![2024-08-22_13h19_02](https://github.com/user-attachments/assets/6adaec86-9cd9-4487-95d1-091daf4c34ca)\r\n\r\n", + "summary": "Investigate the undo functionality in version 2.2 of the desktop app on Windows 11. \n\n**Bug Overview**: \nWhen a user creates a process element and double-clicks it, the undo action requires five clicks to revert the last action, whereas it should only require one click.\n\n**Steps to Tackle the Problem**:\n\n1. **Review Undo Logic**: Examine the code responsible for the undo stack management. Ensure it tracks actions correctly and that each action corresponds to a single undo step.\n \n2. **Debug the Undo Function**: Use debugging tools to trace the execution of the undo function after creating a process element. Check how many actions are being pushed onto the undo stack upon the double-click event.\n\n3. **Check Event Listeners**: Verify if multiple event listeners are attached to the double-click event, leading to multiple actions being recorded.\n\n4. **Test with Different Actions**: Create different process elements using variations (single-click, different elements) to confirm if the issue persists across all scenarios.\n\n5. **Update Documentation**: If a solution is found, update the documentation to reflect the correct usage of the undo function.\n\nImplement these steps to identify and resolve the undo action issue effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1068", @@ -11303,8 +11679,8 @@ "assignees": [], "labels": [ 80, - 83, - 88 + 88, + 83 ] } }, @@ -11313,10 +11689,11 @@ "pk": 393, "fields": { "nest_created_at": "2024-09-11T20:10:25.378Z", - "nest_updated_at": "2024-09-11T20:10:25.378Z", + "nest_updated_at": "2024-09-13T16:57:40.085Z", "node_id": "I_kwDOEAWEP86T2iJI", "title": "Undo does not bring back data flows", "body": "**Describe the bug**:\r\n\r\nHave two processes connected by data flow. When deleting the process, the data flows are deleted, too. When undoing the action, only the process is brought back. The data flows are gone.\r\n\r\n**Expected behaviour**:\r\n\r\nUndo should bring back all deleted elements.\r\n\r\n**Environment**:\r\n\r\n- Version: 2.2\r\n- Platform: Desktop App\r\n- OS: Windows 11\r\n\r\n**To Reproduce**:\r\n\r\n\r\n1. Create two processes\r\n2. Connect them with data flow\r\n3. Delete one process\r\n4. Click undo\r\n\r\n**Any additional context, screenshots, etc**:\r\n\r\n![2024-08-22_13h25_21](https://github.com/user-attachments/assets/4ba1b9ac-d396-45f6-9334-3425312e1f04)\r\n\r\n", + "summary": "**Fix the Undo Functionality for Data Flows** \n\n**Issue**: When deleting a process connected by data flow, the data flows are also removed. Upon using the undo command, only the process is restored, leaving the data flows missing.\n\n**Expected Behavior**: Ensure that the undo action restores all deleted elements, including processes and their associated data flows.\n\n**Environment Details**:\n- Version: 2.2\n- Platform: Desktop App\n- OS: Windows 11\n\n**Reproduction Steps**:\n1. Create two processes.\n2. Connect the processes with a data flow.\n3. Delete one of the processes.\n4. Click the undo button.\n\n**Action Items**:\n1. Investigate the current implementation of the undo functionality to identify why data flows are not restored.\n2. Modify the undo logic to track and restore both processes and their associated data flows.\n3. Implement unit tests to verify that the undo action behaves as expected in scenarios involving multiple connected elements.\n4. Test the functionality across different scenarios to ensure reliability.\n\n**Next Steps**:\n- Review the current data structure handling for processes and data flows.\n- Design a solution to link data flows to their respective processes to ensure they can be restored together.\n- Document the changes and monitor subsequent user feedback for further improvements.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1069", @@ -11324,17 +11701,17 @@ "sequence_id": 2480546376, "is_locked": false, "lock_reason": "", - "comments_count": 3, + "comments_count": 4, "closed_at": null, "created_at": "2024-08-22T11:28:13Z", - "updated_at": "2024-08-23T11:47:50Z", + "updated_at": "2024-09-13T16:33:40Z", "author": 162, "repository": 502, "assignees": [], "labels": [ + 88, 73, - 83, - 88 + 83 ] } }, @@ -11347,6 +11724,7 @@ "node_id": "I_kwDOEAWEP86UHzLm", "title": "move threatdragon.com to owasp domain", "body": "**Describe what problem your feature request solves**:\r\n\r\nAt present we have the demo and docs hosted by heroku at threatdragon.com\r\nIt would be good that this is moved to threatdragon.owasp.org, in a similar way to mas.owasp.org for example\r\n\r\n**Describe the solution you'd like**:\r\nmove to threatdragon.owasp.org\r\n\r\n**Additional context**:\r\nthe index page for mas.owasp.org is here: https://github.com/OWASP/owasp-mastg/blob/master/docs/index.md?plain=1\r\n", + "summary": "Move threatdragon.com to the OWASP domain. \n\n**Problem to Solve**: Currently, the demo and documentation for Threat Dragon are hosted on threatdragon.com via Heroku. Migrating to the OWASP domain (threatdragon.owasp.org) will streamline hosting and enhance visibility within the OWASP community, similar to the setup for mas.owasp.org.\n\n**Proposed Solution**: Transition the hosting from threatdragon.com to threatdragon.owasp.org.\n\n**Technical Steps to Tackle the Problem**:\n1. Set up a subdomain for Threat Dragon on the OWASP domain (threatdragon.owasp.org).\n2. Transfer the existing content from threatdragon.com to the new subdomain.\n3. Ensure that all links and references in documentation are updated to point to the new domain.\n4. Configure DNS settings to direct traffic from threatdragon.com to threatdragon.owasp.org.\n5. Verify that all features function properly on the new hosting environment.\n6. Redirect traffic from threatdragon.com to the new domain to maintain access and SEO.\n\n**Additional Context**: Refer to the index page for mas.owasp.org for structure and layout guidance: https://github.com/OWASP/owasp-mastg/blob/master/docs/index.md?plain=1.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1073", @@ -11375,6 +11753,7 @@ "node_id": "I_kwDOEAWEP86VwNCQ", "title": "Migrate to Bootstrap v5.x", "body": "**Describe what problem your feature request solves**:\r\nThreat Dragon is stuck on Bootstrap version 4.6.1 because of breaking changes going to Bootstrap v5.x\r\nSee https://getbootstrap.com/docs/5.0/migration/\r\n\r\n**Describe the solution you'd like**:\r\nprovide Bootstrap 5.3.3 in file `td.vue/package.json`\r\n\r\n**Additional context**:\r\nMoving to Bootstrap 5.3.3 was tried in commit 37a1734906767b5db265064244222f3ce3c2e90f\r\n", + "summary": "Migrate Threat Dragon to Bootstrap v5.x to resolve issues stemming from reliance on Bootstrap 4.6.1 due to breaking changes. \n\n1. Update the `td.vue/package.json` file to include Bootstrap version 5.3.3.\n2. Review the migration guide at https://getbootstrap.com/docs/5.0/migration/ to identify breaking changes and necessary adjustments.\n3. Test the application thoroughly to ensure compatibility and fix any issues arising from the migration.\n\nBegin by creating a branch for the migration, updating the package version, and conducting an initial test to identify immediate breaking changes.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1080", @@ -11390,8 +11769,8 @@ "repository": 502, "assignees": [], "labels": [ - 87, - 93 + 93, + 87 ] } }, @@ -11400,10 +11779,11 @@ "pk": 396, "fields": { "nest_created_at": "2024-09-11T20:10:30.404Z", - "nest_updated_at": "2024-09-12T01:08:50.312Z", + "nest_updated_at": "2024-09-13T03:53:49.468Z", "node_id": "I_kwDOEAWEP86WD248", "title": "Can't move ends of line objects inside of Trust Boundary frame", "body": "**Describe the bug**:\r\nWhen line objects (i.e., Data Flow, Trust Boundary lines) are inside of a Trust Boundary frame, manipulation of the line ends causes the manipulated end to be placed at a position other than where the cursor was on release of the left mouse button.\r\n\r\n**Expected behaviour**:\r\nLine ends are placed where the mouse cursor is at the time that the left mouse button is released.\r\n\r\n**Environment**:\r\n\r\n- Version: 2.2.0.0\r\n- Platform: Desktop App\r\n- OS: Windows\r\n\r\n**To Reproduce**:\r\n1) On a STRIDE diagram, drag a Data Flow line to the canvas.\r\n2) Drag a Trust Boundary frame to the canvas and resize it so that the Data Flow line is fully enclosed in the Trust Boundary.\r\n3) Click on one end of the Data Flow line and drag it slightly to another position *within* the Trust Boundary.\r\n4) Note that the Trust Boundary highlights yellow (as if something were being anchored to it) once the line end begins to be repositioned.\r\n5) Note that - upon release of the line end - the end will position somewhere else (it looks like the middle of the frame) within the Trust Boundary.\r\n5) Note that selecting only the Trust Boundary and deleting it will delete both objects.\r\n", + "summary": "**Fix the issue with line object manipulation inside Trust Boundary frames**.\n\n**Identify the bug**: When manipulating the ends of line objects within a Trust Boundary frame, the endpoint is not positioned correctly upon mouse release. Instead of aligning with the cursor, it appears to default to a central position within the frame.\n\n**Expected functionality**: Ensure that the endpoints of lines are accurately placed according to the mouse cursor's position at the moment of releasing the left mouse button.\n\n**Technical details**:\n\n1. Investigate the mouse event handling, particularly the logic that determines the final position of line endpoints upon mouse release.\n2. Check for any conditional logic that might be influencing the positioning when lines are within a Trust Boundary frame.\n3. Review any anchoring or snapping features that may interfere with line endpoint placement.\n\n**First Steps**:\n\n1. Set up a development environment using the specified version (2.2.0.0) on Windows.\n2. Reproduce the issue by following the steps outlined in the \"To Reproduce\" section of the bug report.\n3. Add debugging statements to log the coordinates of the mouse cursor during the drag action and the coordinates received upon mouse release.\n4. Analyze how the Trust Boundary frame’s properties affect the line endpoints during manipulation.\n5. Modify the event handling code to ensure that the endpoint's final position reflects the cursor's last known position instead of an arbitrary location.\n\n**Next Actions**: Implement the code changes and test extensively to confirm that the issue is resolved across various scenarios involving Trust Boundary frames and line objects.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-dragon/issues/1082", @@ -11419,10 +11799,10 @@ "repository": 502, "assignees": [], "labels": [ - 80, - 83, + 88, 87, - 88 + 80, + 83 ] } }, @@ -11431,10 +11811,11 @@ "pk": 397, "fields": { "nest_created_at": "2024-09-11T20:10:57.810Z", - "nest_updated_at": "2024-09-11T20:10:57.810Z", + "nest_updated_at": "2024-09-13T15:17:12.248Z", "node_id": "MDU6SXNzdWU3ODY1NjgzODA=", "title": "Participation request", "body": "I'm interested to take part in this amazing initiative. \r\nCould you please guide?\r\nThanks!", + "summary": "A user has expressed interest in participating in the initiative and is seeking guidance on how to get involved. It would be helpful to provide instructions or resources for participation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-kubernetes-security-testing-guide/issues/4", @@ -11465,6 +11846,7 @@ "node_id": "I_kwDOD7_kCs5MnZj6", "title": "Pipeline Tampering Risks & Prevention", "body": "Hi folks,\r\nAs a DevSecOps practitioner for many sizes of development, there is a critical one for maintaining DevSecOps Pipeline to prevent integrity violation and DRY principle with the pipeline consuming\r\n\r\nAbstraction Ideas:\r\n- Pipeline definition store as separate repos\r\n- Consuming pipeline as git sub-modules\r\n- Pipeline call should be visible and measured\r\n\r\nBenefits:\r\n\r\n- [ ] Pipeline enforcement\r\n- [ ] Pipeline integrity\r\n- [ ] Pipeline scalability\r\n\r\nI'm happy to help but not so sure which category should we put it on", + "summary": "The issue discusses the risks of pipeline tampering in DevSecOps and suggests methods for prevention, including storing pipeline definitions in separate repositories, using git sub-modules, and ensuring visibility and measurability of pipeline calls. The proposed changes aim to enhance pipeline enforcement, integrity, and scalability. It also requests guidance on the appropriate category for these suggestions. To move forward, it would be beneficial to categorize the issue and outline actionable steps for implementing the proposed ideas.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/DevSecOpsGuideline/issues/47", @@ -11482,9 +11864,9 @@ 165 ], "labels": [ - 95, 96, - 97 + 97, + 95 ] } }, @@ -11497,6 +11879,7 @@ "node_id": "I_kwDOD7_kCs5MuBHN", "title": "Software Composition Analysis & Supply Chain Risk Management", "body": "Hey!\r\n\r\nI see that the SCA is a little bit less developed than other parts of the doc, so I'd be happy to expand on this to include various techniques, technologies, tools, and workflows on how this is done in a real-world scenario. Let me know if that's what you're interested in. I also gave a talk about it [here](https://mostafa.dev/practical-cybersecurity-supply-chain-risk-management-7a54ed7d9cc7).", + "summary": "There is a request to enhance the Software Composition Analysis (SCA) section of the documentation, as it currently lacks depth compared to other areas. The suggestion includes adding various techniques, technologies, tools, and workflows relevant to real-world scenarios. It's also noted that a relevant talk was given on the topic. Expanding this section would improve the overall quality and comprehensiveness of the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/DevSecOpsGuideline/issues/48", @@ -11514,8 +11897,8 @@ 166 ], "labels": [ - 95, - 96 + 96, + 95 ] } }, @@ -11528,6 +11911,7 @@ "node_id": "I_kwDOD7_kCs5M0c-2", "title": "Access Controls in pipeline config", "body": "Hey All,\r\nI can provide some content about accessing to pipeline configs, user types and branch protection to secure pipeline config files.\r\nIs this topic sound valuable for you?\r\nThnks.", + "summary": "The issue discusses the need for content related to access controls in pipeline configurations, including user types and branch protection for securing these files. The author is seeking feedback on the value of this topic. It may be beneficial to respond with thoughts on the importance of this information and possibly outline specific areas to cover.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/DevSecOpsGuideline/issues/50", @@ -11545,9 +11929,9 @@ 167 ], "labels": [ - 95, 96, - 97 + 97, + 95 ] } }, @@ -11560,6 +11944,7 @@ "node_id": "I_kwDOD7_kCs5M561T", "title": "Add open source solution ", "body": "Gitlab security pipelines\r\nCan be found: https://gitlab.com/whitespots-public/pipelines\r\n\r\nWith Security stage integrated into your team's pipelines, on, let's say, every release, Security stage is run and trigger Security pipelines to do the job. Security stage itself doesn't affect your time-to-market. \r\nSecurity pipelines combine different types of security scanners in one \"Security\" stage. \r\nScans reports are sent to DefectDojo, where triage begins. \r\n\r\nPipelines integration instructions: https://www.youtube.com/watch?v=DLN1kNh_Ha0 \r\nSSDLC based approach is described here: https://www.youtube.com/watch?v=6FGV4OcrIB8\r\nHow to work with DefectDojo tutorial: https://www.youtube.com/watch?v=_uFOIf1BUwU\r\n\r\nWe are contributing to this project for about 2 years and are ready to share. Hope it worth being added here. ", + "summary": "The issue proposes adding an open-source solution for integrating security pipelines within GitLab, specifically for use during release processes. The integration allows for running a security stage that triggers various security scanners, without impacting the time-to-market. The results of these scans are sent to DefectDojo for triaging. \n\nThe author has provided links to video instructions for pipeline integration, a secure software development lifecycle (SSDLC) approach, and a tutorial on working with DefectDojo. The contribution has been developed over two years, indicating a robust solution worth considering for inclusion. \n\nTo address this issue, it may be necessary to evaluate the proposed solution and assess its compatibility with existing systems before proceeding with the integration.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/DevSecOpsGuideline/issues/52", @@ -11588,6 +11973,7 @@ "node_id": "I_kwDOD7_kCs5a2wd4", "title": "Add source of schemas to assests folder", "body": "Hi, can u add the source scheme file of [image](https://github.com/OWASP/DevSecOpsGuideline/blob/master/document/assets/images/Pipeline-view.png) to assests folder.\r\nThis will allow other users to change the images.", + "summary": "The issue requests the addition of the source scheme file for a specific image to the assets folder. This would enable other users to modify the images as needed. To resolve this, the relevant source file should be located and added to the assets folder.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/DevSecOpsGuideline/issues/65", @@ -11603,8 +11989,8 @@ "repository": 513, "assignees": [], "labels": [ - 95, - 97 + 97, + 95 ] } }, @@ -11617,6 +12003,7 @@ "node_id": "I_kwDOD7_kCs5o53SY", "title": "Dependency Security Management & Continuous Dependency Remediation", "body": "In the modern AppSec program, it's necessary to \"shift-left\" security & governance for dependency from the `Code` to the `Plan` stage.\r\n\r\n# Conceptual approach\r\n\r\n## Plan phrase:\r\n\r\n### For OSS Dependency:\r\n- Benchmark OSS dependency project with [OpenSFF Scorecard](https://github.com/ossf/scorecard)\r\n\r\n### For vendor and third-party dependency:\r\n- Involve SBOM as artifacts release manifest in order to be aware of downstream dependencies. The benefits of the SBOM approach allow the security team to perform security assessments without the need for source code - might not available with third-party\r\n\r\nBuilding private dependencies registry to secure store and sign-off for dependency to prevent availability and tampering issues from upstream maintainers\r\n\r\n## Code phrase:\r\n- Setup proper dependency security scanning tool in CI/CD pipeline\r\n- Setup Dependency Vulnerability Assessment to continuously scan and alerts for new finding developers\r\n", + "summary": "The issue discusses the need for enhanced security management for dependencies within modern application security programs by shifting the focus from the coding phase to the planning stage. \n\nKey recommendations include utilizing the OpenSFF Scorecard to benchmark open-source dependencies and incorporating Software Bill of Materials (SBOM) for vendor and third-party dependencies to facilitate security assessments. Furthermore, the establishment of a private dependencies registry is suggested to secure and manage dependencies effectively, mitigating risks associated with availability and tampering.\n\nIn the coding phase, the implementation of dependency security scanning tools in the CI/CD pipeline and continuous vulnerability assessments to alert developers of new findings is emphasized. \n\nAction items may involve integrating the recommended tools and frameworks into the existing security processes to strengthen dependency management and remediation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/DevSecOpsGuideline/issues/75", @@ -11643,6 +12030,7 @@ "node_id": "I_kwDOD7_kCs6K0HXa", "title": "Provenance", "body": "Add steps with provenance generating on Build and provenance check on Deploy", + "summary": "The issue requests the addition of steps for generating provenance during the build process and checking provenance during deployment. This implies that enhancements are needed in the build and deployment workflows to incorporate provenance tracking and verification. Implementing these steps will help ensure the integrity and traceability of the builds.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/DevSecOpsGuideline/issues/86", @@ -11657,12 +12045,12 @@ "author": 169, "repository": 513, "assignees": [ - 166, - 169 + 169, + 166 ], "labels": [ - 95, - 96 + 96, + 95 ] } }, @@ -11675,6 +12063,7 @@ "node_id": "I_kwDOD7_kCs6M_uJB", "title": "Diagram as code", "body": "Would there be a significant interest in moving toward diagram as code for the various schema in this repository? \n\nI have used `mermaid` personally with significant success to either document workflow, git history and a few others common diagram.\n\nIt fosters diagram evolution given everything is \"as code\". \n\n\nNot certain if they would render properly on the web but it does in GitHub markdown. ", + "summary": "The issue discusses the potential interest in adopting \"diagram as code\" for the schemas in the repository, highlighting the success of using `mermaid` for documenting workflows and other diagrams. The author notes that this approach supports the evolution of diagrams since they are managed as code. There is uncertainty about whether these diagrams would render correctly on the web, although they do work in GitHub markdown. \n\nTo move forward, it may be beneficial to evaluate the feasibility of integrating `mermaid` or similar tools into the repository's documentation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/DevSecOpsGuideline/issues/89", @@ -11697,10 +12086,11 @@ "pk": 406, "fields": { "nest_created_at": "2024-09-11T20:11:45.076Z", - "nest_updated_at": "2024-09-12T00:15:03.994Z", + "nest_updated_at": "2024-09-13T17:00:41.837Z", "node_id": "I_kwDOD7_kCs6Vsx-L", "title": "Translating DevSecOps Guideline to \"Farsi\"", "body": "A clear and technical translation with simplicity is needed for the valuable set of recommendations presented in this project for the Persian speaking community, and we intend to translate it.", + "summary": "The issue discusses the need for a clear and technically accurate translation of the DevSecOps Guideline into Farsi, aimed at making the recommendations accessible to the Persian-speaking community. The task involves ensuring that the translation is both straightforward and retains the original intent of the guidelines. It would be beneficial to initiate the translation process while considering these aspects.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/DevSecOpsGuideline/issues/91", @@ -11727,6 +12117,7 @@ "node_id": "I_kwDOD7JcLc5XKQof", "title": "Feature Request -- FIM Stats on each run", "body": "It would be helpful to have a report on the same line that reports the time required for the cycle.\r\nAn example is below:\r\n\r\n Total time consumed in this round file integrity checking = 1006506ms (1006.506s) Number of files Scanned = 10003 Number of files in the DB = 10040\r\n\r\nLatest Round: TimeConsumed=1006506ms (1006.506s) FilesScanned=10003 CurrentFilesInDB=10045\r\n", + "summary": "Implement a feature to display performance statistics for each file integrity monitoring (FIM) run. Include a report that details the total cycle time, number of files scanned, and current files in the database. \n\nStructure the output as follows:\n- Total time consumed in this round of file integrity checking: `Total time consumed = {time}ms ({time in seconds}s)`\n- Include counts for:\n - Number of files scanned\n - Number of files in the database\n\nFor example:\n```\nTotal time consumed in this round of file integrity checking = 1006506ms (1006.506s)\nNumber of files scanned = 10003\nNumber of files in the DB = 10040\n\nLatest Round: TimeConsumed=1006506ms (1006.506s) FilesScanned=10003 CurrentFilesInDB=10045\n```\n\n**First Steps:**\n1. Identify the section of the code responsible for executing the FIM runs.\n2. Measure the execution time using a suitable timer method (e.g., `System.currentTimeMillis()` in Java).\n3. Gather statistics on the number of files scanned and those present in the database during each run.\n4. Format and output the collected data in the specified report format after each run.\n5. Test the implementation to ensure accuracy and performance impact.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-winfim.net/issues/8", @@ -11753,6 +12144,7 @@ "node_id": "I_kwDOD7JcLc5Y841u", "title": "Newline on the StopMessage String", "body": "There is a carriage return on the stop message string located in the Controller.cs:\r\n\r\n string stopMessage = \"Total time consumed in this round file integrity checking = \" +\r\n watch.ElapsedMilliseconds + \"ms (\" +\r\n Math.Round(Convert.ToDouble(watch.ElapsedMilliseconds) / 1000, 3)\r\n .ToString(CultureInfo.InvariantCulture) + \"s).\\n\" +\r\n LogHelper.GetRemoteConnections();\r\n\r\nThis newline creates havoc with Excel importing CSV. More specifically, the cell looks blank.\r\n", + "summary": "Remove the newline character from the stop message string in `Controller.cs`. The current implementation introduces a carriage return that disrupts the CSV format, causing Excel to misinterpret the data, resulting in blank cells.\n\n**Technical Steps to Address the Issue:**\n\n1. Locate the `stopMessage` string declaration in `Controller.cs`.\n2. Eliminate the newline character (`\\n`) at the end of the string.\n - Change:\n ```csharp\n string stopMessage = \"Total time consumed in this round file integrity checking = \" +\n watch.ElapsedMilliseconds + \"ms (\" +\n Math.Round(Convert.ToDouble(watch.ElapsedMilliseconds) / 1000, 3)\n .ToString(CultureInfo.InvariantCulture) + \"s).\\n\" +\n LogHelper.GetRemoteConnections();\n ```\n - To:\n ```csharp\n string stopMessage = \"Total time consumed in this round file integrity checking = \" +\n watch.ElapsedMilliseconds + \"ms (\" +\n Math.Round(Convert.ToDouble(watch.ElapsedMilliseconds) / 1000, 3)\n .ToString(CultureInfo.InvariantCulture) + \"s).\" +\n LogHelper.GetRemoteConnections();\n ```\n\n3. Test the output to ensure the string formats correctly without causing issues in Excel.\n4. Validate the CSV import functionality to confirm that the changes resolve the blank cell issue in Excel.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-winfim.net/issues/9", @@ -11779,6 +12171,7 @@ "node_id": "I_kwDOD7JcLc5Y8-P5", "title": "Improper Date Time Representation", "body": "Within Service.cs and Controller.cs there are multiple places where the log format is set to yyyy/dd/MM.\r\nIt Should be yyyy/MM/dd\r\n", + "summary": "Correct the date time representation in Service.cs and Controller.cs. Replace all instances of the log format set to `yyyy/dd/MM` with `yyyy/MM/dd`. \n\n1. Open Service.cs and Controller.cs files.\n2. Search for occurrences of `yyyy/dd/MM`.\n3. Update each occurrence to `yyyy/MM/dd`.\n4. Test the logging functionality to ensure the date format is displayed correctly after the changes. \n5. Commit the changes and create a pull request for review.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-winfim.net/issues/10", @@ -11805,6 +12198,7 @@ "node_id": "I_kwDOD7JcLc5aN67s", "title": "Windows Defender spikes CPU usage when WinFIM.NET service is running", "body": "Hello,\r\n\r\nWe are testing your tool on Windows Server 2019. Somehow, when the WinFIM.NET service is running, it spikes up Windows Defender CPU usage. Even when we do not have any files in the monitoring folder (C:/Temp currently). We tried to set couple exclusions in Windows Defender, but no luck. Also, the tool is set to run with parameter 0 in scheduler file.\r\n\r\nAny idea how to set this properly? Or how to set the exclusions properly?\r\n\r\nThanks.", + "summary": "Investigate high CPU usage by Windows Defender when WinFIM.NET service is active on Windows Server 2019. \n\n1. Confirm that the WinFIM.NET service is running with parameter 0 as intended.\n2. Attempt to add specific exclusions for the WinFIM.NET service in Windows Defender:\n - Open Windows Security settings.\n - Navigate to \"Virus & threat protection.\"\n - Select \"Manage settings\" under \"Virus & threat protection settings.\"\n - Scroll down to \"Exclusions\" and add C:/Temp and any relevant executables related to WinFIM.NET.\n3. Monitor CPU usage after applying exclusions to determine if the issue persists.\n4. Check for any other processes that may interact with WinFIM.NET that could also be causing CPU spikes.\n5. Review the Windows Event Viewer for any logs that might indicate conflicts or issues with the WinFIM.NET service or Windows Defender. \n\nDocument any changes made and their effects on CPU usage for further troubleshooting if necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-winfim.net/issues/12", @@ -11831,6 +12225,7 @@ "node_id": "I_kwDOD7JcLc5u6IBN", "title": "No owner info for who delete or modify files", "body": "Hi,\r\n\r\nIf somebody deletes or modify the files, from the FIM logs, we don't know who did it. How to add feature to track who delete or modify files? ", + "summary": "Implement a feature to track file deletions and modifications by logging user information in FIM (File Integrity Monitoring) logs. \n\n1. Analyze the current FIM logging system to identify how file changes are recorded.\n2. Modify the logging mechanism to capture and store the user ID or username associated with file modifications and deletions.\n3. Ensure that the system can retrieve user information from the operating environment (e.g., leveraging audit logs or filesystem events).\n4. Update documentation to reflect new logging capabilities and instructions for monitoring file changes.\n5. Test the implementation in a controlled environment to verify that user information is correctly logged and retrievable. \n6. Deploy the changes to the production environment and monitor for any issues.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-winfim.net/issues/15", @@ -11853,10 +12248,11 @@ "pk": 412, "fields": { "nest_created_at": "2024-09-11T20:12:16.371Z", - "nest_updated_at": "2024-09-11T20:12:16.371Z", + "nest_updated_at": "2024-09-13T15:17:59.316Z", "node_id": "I_kwDOD7JcLc6PiR9W", "title": "No support for UTF-8 characters in path", "body": "WinFIM throws 7773 errors whenever a word in the path to a file contains a non en_US character.\r\nThe problematic letters are replaced in the path which logically makes it wrong as the log detail shows:\r\n`[...] could be renamed / deleted during the hash calculation. This file is ignored in this checking cycle - Can't find a part of the path`\r\n\r\nIt would be nice to add UTF-8 support or at least give a procedure on how to enable it.", + "summary": "Enhance WinFIM to support UTF-8 characters in file paths. Currently, the application throws errors (7773) when encountering non-English characters in paths, leading to incorrect file handling during hash calculations. The logs indicate that files with non-en_US characters are ignored, causing issues like: `[...] could be renamed / deleted during the hash calculation. This file is ignored in this checking cycle - Can't find a part of the path`.\n\nTo tackle this problem:\n\n1. **Review Current Encoding Methods**: Investigate how WinFIM currently handles file paths and identify the areas where character encoding is limited to en_US.\n\n2. **Implement UTF-8 Encoding**: Modify the file path handling code to support UTF-8 encoding. Ensure that all I/O operations (reading, writing, hashing) correctly interpret and process UTF-8 characters.\n\n3. **Test with Various Encodings**: Create test cases that include file paths with a range of UTF-8 characters. Verify that the application can handle these paths without errors.\n\n4. **Update Documentation**: If a straightforward method to enable UTF-8 support is not feasible, document the steps required to configure the application for UTF-8 processing.\n\n5. **Seek Community Feedback**: Engage with users to gather insights on their experiences and any additional requirements concerning file path handling.\n\nBy following these steps, ensure that WinFIM can robustly handle file paths containing UTF-8 characters, improving its usability for a broader audience.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-winfim.net/issues/16", @@ -11879,10 +12275,11 @@ "pk": 413, "fields": { "nest_created_at": "2024-09-11T20:13:00.030Z", - "nest_updated_at": "2024-09-11T20:13:00.030Z", + "nest_updated_at": "2024-09-13T15:18:33.473Z", "node_id": "I_kwDOD5sfqM6BytKM", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context for users. To address this, a concise and informative description outlining the project's purpose, features, and usage should be created and added.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-jvmxray/issues/3", @@ -11905,10 +12302,11 @@ "pk": 414, "fields": { "nest_created_at": "2024-09-11T20:13:07.406Z", - "nest_updated_at": "2024-09-11T20:13:07.406Z", + "nest_updated_at": "2024-09-13T15:18:39.442Z", "node_id": "I_kwDOD5sGHc6BytI9", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. It suggests that including a concise overview of the project's purpose and functionality would enhance understanding for potential users and contributors. To address this, a brief and informative description should be drafted and added to the project documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-redteam-toolkit/issues/2", @@ -11931,10 +12329,11 @@ "pk": 415, "fields": { "nest_created_at": "2024-09-11T20:13:21.779Z", - "nest_updated_at": "2024-09-11T20:13:21.779Z", + "nest_updated_at": "2024-09-13T15:18:50.605Z", "node_id": "MDU6SXNzdWU2NjEwMTIyMTI=", "title": "Create OWASP conference table package", "body": "During the July Outreach Committee call we agreed that we could setup a suggested conference package that the foundation could ship in support of OWASP tables in conferences.\r\n\r\nIn order to close this ticket we need:\r\n\r\nList of suggested artwork\r\nList of suggested swag and formats\r\nA suggested relevant playbook for ordering shipping and expensing the \"conference package\" per world region.\r\nA suggested slide-deck/talking points(?)\r\nSince, to my understanding, OWASP cannot promote commercial distributors, the list of suggested distributors can be private\r\n\r\nThe purpose of this action is to enable volunteers to represent OWASP in events with tables", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-outreach/issues/8", @@ -11963,6 +12362,7 @@ "node_id": "MDU6SXNzdWU3OTY5MDk0OTg=", "title": "Fix numbering before release", "body": "We have to make sure to fix the numbering of requirements before release. ", + "summary": "The issue highlights the need to correct the numbering of requirements prior to the release. It suggests that a review and update of the current numbering system is necessary to ensure accuracy.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoT-Security-Verification-Standard-ISVS/issues/55", @@ -11991,6 +12391,7 @@ "node_id": "MDU6SXNzdWU4MDg4MjgxODk=", "title": "Add info on how to deal with non conformities in \"using the ISVS\" chapter", "body": "The ISVS currently does not address that not implementing a security control and/or accepting a failed security control/vulnerability is a effort vs risk based decision. We could add something to the using the ISVS chapter. \r\n", + "summary": "The issue suggests enhancing the \"using the ISVS\" chapter by including guidance on managing non-conformities, particularly regarding the decision-making process for not implementing security controls or accepting vulnerabilities. It emphasizes the need for a discussion on the effort versus risk-based decisions in these scenarios. To address this, additional content outlining best practices or frameworks for handling such situations could be beneficial.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoT-Security-Verification-Standard-ISVS/issues/63", @@ -12017,10 +12418,11 @@ "pk": 418, "fields": { "nest_created_at": "2024-09-11T20:13:50.765Z", - "nest_updated_at": "2024-09-12T00:15:08.880Z", + "nest_updated_at": "2024-09-13T16:59:56.501Z", "node_id": "MDU6SXNzdWU4MTUxODk0Mzg=", "title": "Suggestions to improve consistency - Update \"Using ISVS\" Figure", "body": "There are several areas which I believe could be more consistent:\r\n\r\n1. Wi-Fi and Bluetooth\r\nThese are very specific protocols. What if the device uses LoRaWAN or Zigbee?\r\nIt could be more beneficial to separate LAN and WAN communications as the threat model is different.\r\n\r\n2. Figure used in https://github.com/OWASP/IoT-Security-Verification-Standard-ISVS/blob/master/en/Using_ISVS.md#the-isvs-security-model\r\nThe blocks used in the figure are confusing: they mix components and features (e.g. Wi-Fi or software updates) with processes (e.g. design, secure development).\r\nI would suggest to:\r\n- Regroup all processes in one box.\r\n- Simplify the device \"design\" with components and features in boxes representing a physical device. For example: \"internal hardware\", \"external hardware\", \"software\".\r\n\r\n", + "summary": "The issue discusses the need for improved consistency in the \"Using ISVS\" figure. It highlights two main areas for enhancement: \n\n1. The distinction between communication protocols, suggesting a separation of LAN and WAN communications to address different threat models.\n \n2. The organization of elements in the figure, recommending that components and processes be clearly differentiated. It suggests regrouping all processes into one box and simplifying the device design by categorizing components and features into boxes representing a physical device.\n\nTo address these points, it would be beneficial to revise the figure to enhance clarity and consistency.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoT-Security-Verification-Standard-ISVS/issues/65", @@ -12035,12 +12437,12 @@ "author": 177, "repository": 547, "assignees": [ - 12, - 176 + 176, + 12 ], "labels": [ - 98, - 99 + 99, + 98 ] } }, @@ -12053,6 +12455,7 @@ "node_id": "MDU6SXNzdWU4MzAxNjE2MTA=", "title": "Missing Freeze and Mix & Match attack cases", "body": "The firmware update chapter currently explicitly covers roll-back attacks. The Freeze and Mix & Match attack cases are not (explicitly) covered. \r\n- Freeze attacks: an attacker tricks the device / ecosystem in believing the device is up to date while in reality it is not. \r\n- Mix & Match attacks: an attacker supplies combinations of packages or package metadata that never existed in the upstream repository. Can occur if for example individual packages are signed, but package indexes are not. \r\n\r\nThese, together with others that we potentially overlooked, can be found here: https://theupdateframework.io/security/ \r\n\r\n", + "summary": "The issue highlights the absence of coverage for Freeze and Mix & Match attack cases in the firmware update chapter, which currently only addresses roll-back attacks. Freeze attacks involve misleading the device into thinking it is updated when it is not, while Mix & Match attacks consist of supplying non-existent combinations of packages or metadata. The discussion suggests reviewing additional attack scenarios listed on the provided resource link to ensure comprehensive coverage of security threats. It may be beneficial to update the documentation to include these attack cases.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoT-Security-Verification-Standard-ISVS/issues/80", @@ -12081,6 +12484,7 @@ "node_id": "MDU6SXNzdWU4MzQ0MTUxNDg=", "title": "L3 requirements for Bluetooth and Wifi aren't high enough", "body": "The suggestions for Bluetooth and Wifi are reasonable but for L3 I think they need to go further:\r\n\r\n- WPA3 for Wifi.\r\n- Minimum of Bluetooth 4.2 - improved security over 4.1 with Secure Connections.\r\n-- I can't find any reference to improved security for 5, 5.1 and 5.2.\r\n- Recommendations on the different pairing models. I'm still researching this, but it seems the 6 digit PIN in SSP (Secure Simple Pairing) isn't particularly strong - ~~see tools like https://github.com/mikeryan/crackle/~~ _Crackle only works on Legacy Pairing, not Secure Connections, see https://github.com/mikeryan/crackle/blob/master/FAQ.md#is-my-device-using-le-legacy-pairing-or-le-secure-connections_\r\n", + "summary": "The issue highlights that the L3 requirements for Bluetooth and Wifi need to be enhanced. Specifically, it suggests implementing WPA3 for Wifi security and setting a minimum requirement of Bluetooth 4.2, which offers better security than 4.1. Additionally, there is a call for recommendations regarding different pairing models, particularly addressing concerns about the strength of the 6-digit PIN in Secure Simple Pairing. Further research into these security aspects is necessary to improve the overall requirements. It would be beneficial to update the L3 requirements to include these enhancements.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoT-Security-Verification-Standard-ISVS/issues/82", @@ -12110,6 +12514,7 @@ "node_id": "I_kwDODpWfz847V6-W", "title": "Detect & Response set of requirements missing", "body": "The ISVS currently does not cover security requirements related to detecting and responding to security incidents. \r\n\r\nExample requirement that's missing: Verify that an appropriate response strategy is in place in case an end device's root keys are compromised, given that root keys cannot be remotely updated. \r\n\r\nOther example of a requirement that exists but could be generalized: \r\n| **4.5.8** | Verify that users can obtain an overview of paired devices to validate that they are legitimate (for example, by comparing the MAC addresses of connected devices to the expected ones). | | | ✓ |", + "summary": "The issue highlights a gap in the current ISVS regarding the absence of security requirements focused on detecting and responding to security incidents. A specific example provided is the need to ensure an appropriate response strategy for compromised root keys, which cannot be updated remotely. Additionally, it suggests that some existing requirements, like user validation of paired devices, could benefit from being generalized. To address this, the requirements for incident detection and response need to be developed and integrated into the ISVS framework.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoT-Security-Verification-Standard-ISVS/issues/86", @@ -12136,6 +12541,7 @@ "node_id": "I_kwDODpWfz849BY3K", "title": "V2: Missing anti-bruteforce & reauthentication requirements in authentication section", "body": "Hi! \r\n\r\nI noticed that V2 doesn't currently contain any requirements for: \r\n\r\n- anti-bruteforce protection of authentication mechanisms \r\n- re-authentication in regular intervals (so that users and/or devices do not remain authenticated forever). \r\n\r\nDo you think it would be a good idea to add these? Some example (draft) requirements could be: \r\n\r\n- Verify that authentication mechanisms have sufficient protection against brute force attacks.\r\n- Verify that re-authentication of users and devices is required at regular intervals. The interval duration should depend on the security criticality of the application or functionality. ", + "summary": "The issue highlights the absence of anti-bruteforce protection and reauthentication requirements in the V2 authentication section. It suggests that these elements should be included to enhance security. Proposed requirements include ensuring authentication mechanisms are robust against brute force attacks and mandating regular re-authentication based on the application's security needs. It would be beneficial to consider adding these requirements to strengthen the authentication framework.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoT-Security-Verification-Standard-ISVS/issues/88", @@ -12162,6 +12568,7 @@ "node_id": "I_kwDODpWfz85CVeDF", "title": "3.2.9\t RAM scrambling?", "body": "Hi,\r\n\r\n3.2.9 on OS configuration says that one should:\r\n Verify the embedded OS provides protection against unauthorized access to RAM (e.g. RAM scrambling).\r\n\r\nI'm somewhat confused about this as I thought RAM scrambling was a transparent hardware security measure. Could u please list some examples on OS's where you can effectively configure RAM scrambling or similar measures? Is there anything in Linux for that? \r\n\r\nThanks,\r\nAttila\r\n\r\n", + "summary": "The issue raises a question about the requirement in OS configuration 3.2.9 regarding RAM scrambling and its role in protecting against unauthorized access to RAM. The user seeks clarification on whether RAM scrambling is a hardware feature and requests examples of operating systems that allow for effective configuration of RAM scrambling or similar security measures, particularly looking for Linux solutions. To address this, the issue could benefit from providing specific examples and guidance on configuring RAM scrambling in various operating systems, including Linux.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoT-Security-Verification-Standard-ISVS/issues/89", @@ -12188,6 +12595,7 @@ "node_id": "I_kwDODhZs8M6DQdbY", "title": "Update: 07-implementation/00-toc to include a section on SAST", "body": "**Describe what change you would like** : \r\nIt would be good to have a section on static analysis in the implementation section, and add semgrep to the new section\r\n\r\n**Context** : \r\nSection: 07-implementation/00-toc\r\n\r\nsemgrep: https://semgrep.dev/docs/getting-started/quickstart-oss/\r\n\r\n", + "summary": "The issue requests the addition of a section on static analysis in the implementation section, specifically within the 07-implementation/00-toc. The suggestion includes incorporating information about semgrep, a static analysis tool. It would be beneficial to update the table of contents to reflect this new section.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-developer-guide/issues/179", @@ -12212,10 +12620,11 @@ "pk": 425, "fields": { "nest_created_at": "2024-09-11T20:14:55.441Z", - "nest_updated_at": "2024-09-11T20:14:55.441Z", + "nest_updated_at": "2024-09-13T15:19:55.206Z", "node_id": "I_kwDODhZs8M6HbWkI", "title": "Create OpenAI Agent backed by this repo & the Wayfinder", "body": "**Describe what content should be added** : \r\n\r\n\r\nCreate an OpenAI chatbot backed by an OS LLM trained with the knowledge from this repo, the Wayfinder project (OWASP Integration Standards), SKF (?)\r\n\r\n**Context** : \r\nAll sections \r\n\r\n\r\n", + "summary": "The issue requests the creation of an OpenAI chatbot that utilizes an open-source language model (LLM) trained with knowledge from a specific repository and the Wayfinder project, which relates to OWASP Integration Standards. It suggests that the implementation should encompass all sections of the existing content. To proceed, further exploration of the current repository and integration of the relevant knowledge from Wayfinder and SKF may be necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-developer-guide/issues/196", @@ -12248,6 +12657,7 @@ "node_id": "I_kwDODhZs8M6PN9Sy", "title": "Provide cross references to cornucopia", "body": "**Describe what content should be added** : \r\nIt would be good to link across to the cornucopia cards from the checklist\r\nThe first step to cross referencing to cornucopia is provide identifiers to the checklist (done in #225 )\r\nthen the checklists can link across to the cornucopia cards API\r\n\r\n**Context** : \r\nSection: 06-design/02-web-app-checklist/\r\n\r\nFrom @sydseter 's comment in Threat dragon issue #140 '[Integrate threat engine with Cornucopia / EoP cards](https://github.com/OWASP/threat-dragon/issues/140)', we have : \r\nFull yaml source can be found at https://github.com/OWASP/cornucopia/tree/master/source\r\nWe also have endpoints to the OWASP Cornucopia Website App and Mobile App editions.\r\n\r\nOWASP Cornucopia Website App Edition:\r\n\r\nhttps://copi.securedelivery.io/cards/2.00/DV2\r\nhttps://copi.securedelivery.io/cards/2.00/AC2\r\nhttps://copi.securedelivery.io/cards/2.00/SM2\r\nhttps://copi.securedelivery.io/cards/2.00/AZ2\r\nhttps://copi.securedelivery.io/cards/2.00/CR2\r\nhttps://copi.securedelivery.io/cards/2.00/CO2\r\n\r\nOWASP Cornucopia Mobile App Edition:\r\n\r\nhttps://copi.securedelivery.io/cards/1.00/CO2\r\nhttps://copi.securedelivery.io/cards/1.00/PC2\r\nhttps://copi.securedelivery.io/cards/1.00/AA2\r\nhttps://copi.securedelivery.io/cards/1.00/NS2\r\nhttps://copi.securedelivery.io/cards/1.00/RS2\r\nhttps://copi.securedelivery.io/cards/1.00/CRM2\r\nhttps://copi.securedelivery.io/cards/1.00/COM2\r\n", + "summary": "The issue discusses the need to create cross-references between the checklist and the Cornucopia cards. The first step involves adding identifiers to the checklist, which has already been addressed in a previous issue. Following this, the checklists should be linked to the Cornucopia cards API for better integration and accessibility.\n\nTo move forward, it will be necessary to implement the linking mechanism between the checklist and the Cornucopia cards using the provided API endpoints.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-developer-guide/issues/231", @@ -12277,6 +12687,7 @@ "node_id": "I_kwDODhZs8M6QTlxM", "title": "Translate DevGuide to Spanish", "body": "**Describe what change you would like** : \r\nThe Developer Guide needs translations, so it would be great if the DevGuide could be translated\r\nSpanish is the first choice because of its wide use across countries/continents\r\n\r\n**Context** : \r\nSection: release-es/*\r\n\r\nContact @jgadsden for advice on how to approach this", + "summary": "The issue requests the translation of the Developer Guide into Spanish due to its extensive use across various regions. It suggests focusing on the release-es/* sections and indicates that advice can be sought from a specified contact for guidance on how to proceed with the translation effort.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-developer-guide/issues/239", @@ -12292,8 +12703,8 @@ "repository": 563, "assignees": [], "labels": [ - 102, - 107 + 107, + 102 ] } }, @@ -12306,6 +12717,7 @@ "node_id": "I_kwDODhZs8M6QVeyd", "title": "Translations to other langauges", "body": "**Describe what change you would like** : \r\nThe Developer Guide needs translations to reach a wider audience. Suggested languages are:\r\n\r\n- Arabic 'العربية'\r\n- German 'Deutsch'\r\n- Greek 'Ελληνικά'\r\n- English\r\n- Finnish 'Suomi'\r\n- French 'Français'\r\n- Hindi 'हिंदी'\r\n- Bahasa Indonesia\r\n- Japanese '日本語'\r\n- Malay\r\n- Chinese '中文'\r\n\r\nSpanish translation is subject of separate issue #239 \r\nPortuguese 'Português' - translation subject of separate issue #267 \r\n\r\n**Context** : \r\nSection: all sections\r\n\r\n", + "summary": "The issue highlights the need for translating the Developer Guide into multiple languages to make it accessible to a broader audience. The suggested languages include Arabic, German, Greek, Finnish, French, Hindi, Bahasa Indonesia, Japanese, Malay, and Chinese. It notes that Spanish and Portuguese translations are being addressed in separate issues. To move forward, translations for the listed languages should be prioritized.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-developer-guide/issues/240", @@ -12321,8 +12733,8 @@ "repository": 563, "assignees": [], "labels": [ - 102, - 107 + 107, + 102 ] } }, @@ -12335,6 +12747,7 @@ "node_id": "I_kwDODhZs8M6R4zQ-", "title": "Add OWASP MASWE", "body": "**Describe what change you would like** : \r\nThere is a new enumeration for the MAS project, [MASWE](https://mas.owasp.org/MASWE)\r\nIt would be good to provide a page on this\r\n\r\n**Context** : \r\nSection: not sure\r\n", + "summary": "The issue requests the addition of a new enumeration for the OWASP MAS project, specifically for MASWE. The suggestion is to create a dedicated page to provide information about this enumeration. It is unclear which section of the project this should be added to, indicating that further clarification may be needed. To address this, the appropriate section for the new page should be determined and the content for MASWE created.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-developer-guide/issues/246", @@ -12365,6 +12778,7 @@ "node_id": "I_kwDODhZs8M6TTjZv", "title": "Add OWASP OpenCRE from Wayfinder project", "body": "**Describe what change you would like** : \r\nThe Open Common Requirement Enumeration, OpenCRE, is part of the Wayfinder project and it would be good to have a page in the Developer Guide for this\r\n\r\n**Context** : \r\nSection: new section `05-requirements/03-opencre.md`\r\n`05-requirements/03-skf.md` moved to `05-requirements/07-skf.md`\r\n", + "summary": "A request has been made to include a new page in the Developer Guide for the Open Common Requirement Enumeration (OpenCRE) from the Wayfinder project. This would involve creating a new section titled `05-requirements/03-opencre.md` and relocating the existing `05-requirements/03-skf.md` to `05-requirements/07-skf.md`. It is suggested that the necessary documentation and content for OpenCRE be developed and added accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-developer-guide/issues/258", @@ -12396,6 +12810,7 @@ "node_id": "I_kwDODhZs8M6U2T8-", "title": "Translate DevGuide to Portuguese Brazil", "body": "**Describe what change you would like** : \r\nProvide the DevGuide translation to Brazil Portuguese\r\nA new fileset has been created under `release-pt-br` and the ToC created in `_data/release-pt-br.yaml`\r\n\r\n**Context** : \r\nSection: `release-pt-br` and `_data/release-pt-br.yaml`\r\n", + "summary": "The issue requests the translation of the DevGuide into Brazilian Portuguese. A new fileset has been established under `release-pt-br`, and the Table of Contents is available in `_data/release-pt-br.yaml`. To address this, the necessary translation work needs to be completed for the DevGuide.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-developer-guide/issues/267", @@ -12427,6 +12842,7 @@ "node_id": "MDU6SXNzdWU1NTk3ODMzMzc=", "title": "OWASP Fragmentation Document ", "body": "**What is required?**\r\nDeliver a document showcasing how the OWASP projects are fragmented across the SDLC, what are some best practices to implementing the tools, and what are some bad use cases.", + "summary": "The issue requests the creation of a document that highlights the fragmentation of OWASP projects throughout the Software Development Life Cycle (SDLC). It should include best practices for implementing these tools as well as examples of poor use cases. To address this, a comprehensive analysis of OWASP tools and their integration into various stages of the SDLC is needed, along with practical recommendations and cautionary examples.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-integration-standards/issues/4", @@ -12453,6 +12869,7 @@ "node_id": "MDU6SXNzdWU1NTk3ODU4MDM=", "title": "Projects Integration Discussions with Project Leaders", "body": "**What is required?**\r\nDiscuss with the major projects that are interested in integrating with other projects and see how they would benefit best from other projects, and what they can deliver for other projects as well.", + "summary": "The issue highlights the need for discussions with major projects interested in integration. The goal is to explore mutual benefits and contributions between these projects. To move forward, it is suggested to initiate conversations with project leaders to identify potential collaboration opportunities and define how each project can support the other.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-integration-standards/issues/6", @@ -12479,6 +12896,7 @@ "node_id": "MDU6SXNzdWU1NjE1OTE4NjE=", "title": "Create Roadmap", "body": "**Summary:** Create a one-page document listing where this project is going.\r\n**Current State:** An external observer cannot find goals and targets of the project\r\n**Acceptance Criteria:** There is a one page document listing deliverables and problems to be tackled for the next semester", + "summary": "The issue requests the creation of a one-page roadmap document that outlines the project's goals and targets. Currently, it is difficult for external observers to understand the project's direction. The completion of this task requires a concise document that lists the deliverables and challenges to be addressed in the upcoming semester.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-integration-standards/issues/7", @@ -12505,6 +12923,7 @@ "node_id": "MDU6SXNzdWU1NjE2MzUyNzk=", "title": "Automated Way of Showing Info about OWASP Projects", "body": "**Summary**: OWASP has a ton of projects which are very fragmented across both org repos and random github repos. Each project performs it's own tasks but there's no centralized way of finding what each project does and how it connects to the rest.\r\nThis can be solved by adding a small metadata file to each project root.\r\nThen we can use Github's api to either search for specific strings in the metadata files or, grab the metadata files from the owasp org and statically map owasp projects not under the org.\r\nFrom there a script can extract metadata and populate a template with:\r\n\r\n- State of SDLC each project fits in,\r\n- Links to github and docs\r\n- Builders Breakers Defenders designation\r\n\r\nThis allows central presentation of all owasp projects and ease of finding new ones.\r\n\r\nMetadata file format TBD.\r\n\r\n**Acceptance Criteria:** \r\n\r\n* There's an automated way to gather info about owasp projects and presenting them.\r\n* There is a way to see info about flagship projects in one place.\r\n", + "summary": "The issue highlights the fragmentation of OWASP projects across various repositories, making it difficult to understand their functions and interconnections. A proposed solution involves adding a metadata file to each project root, which can be utilized alongside GitHub's API to gather information on all OWASP projects, including those not under the organization’s main repository. The extracted metadata would include details such as the SDLC stage of each project, relevant links, and designations like Builders, Breakers, and Defenders. \n\nTo move forward, the format for the metadata file needs to be determined, and an automated system for collecting and presenting this information should be developed, ensuring flagship projects are easily accessible in one location.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-integration-standards/issues/8", @@ -12533,6 +12952,7 @@ "node_id": "MDU6SXNzdWU2ODY5OTQ1NjU=", "title": "Projects Update in the Diagram", "body": "Requirements:\r\n- SCVS\r\n\r\nImplementation -> Secure Libraries:\r\n- OWASP Java Encoder\r\n- OWASP Java HTML Sanitizer\r\n\r\nVerification -> Tools:\r\n- OWASP O-Saft\r\n\r\nOperations:\r\n- Dependency Track\r\n\r\nEdit 1: Add O-Saft", + "summary": "The issue discusses updates needed in a project diagram, specifically focusing on the Secure Libraries and Verification sections. The requirements include implementing the OWASP Java Encoder and OWASP Java HTML Sanitizer for secure libraries. For verification, the addition of OWASP O-Saft is requested, along with the mention of Dependency Track in operations. An edit indicates the necessity to add O-Saft to the diagram. It appears that the next steps involve incorporating these elements into the project diagram accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-integration-standards/issues/24", @@ -12559,6 +12979,7 @@ "node_id": "MDU6SXNzdWU3MTMwODk0MzE=", "title": "add a csv/google sheets front end to the cre rest api", "body": "There's currently no way to import/export data to/from the REST API.\r\nThis ticket has two parts:\r\n* Given a REST call, add the ability to export the results into a google sheets spreadsheet which allows humans to get the mappings into a CSV format. This is already partly implemented (utils/spreadsheet_utils.py) but we haven't implemented any API plumbing for it.\r\n\r\n* There's a way for Authorissed users to submit a spreadsheet that follows the [template](https://docs.google.com/spreadsheets/d/1f47aZydJ47n-iGb0fkmu880wSaFyYDM-zdkgs6oMV7I/edit#gid=1841715588) and import mappings from it.\r\nThis involves some openidconnect integration and some way of allowing which users can submit mappings + plumbing for the parsers to receive a spreadsheet url. This functionality already exists via the cmd api but haven't managed to plumb the openid connect stuff.", + "summary": "The issue highlights the need to enhance the existing REST API by adding functionality for importing and exporting data through CSV or Google Sheets. It consists of two main tasks: \n\n1. Implementing the capability to export results from a REST call into a Google Sheets format, utilizing the partially developed features in `utils/spreadsheet_utils.py`, and establishing the necessary API integration.\n \n2. Enabling authorized users to submit spreadsheets that follow a specified template for importing mappings. This requires integrating OpenID Connect for user verification, along with the necessary infrastructure to process the submitted spreadsheet URLs and connect to the existing command-line API functionality.\n\nTo address the issue, the focus should be on completing the API integrations and ensuring proper user authorization for spreadsheet submissions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-integration-standards/issues/25", @@ -12585,6 +13006,7 @@ "node_id": "I_kwDODUeCys48d35_", "title": "Consider to use the 'Open Security Information Base' as OWASP driven ID", "body": "The OWASP Top10 team has developed an open structure to store information about OWASP projects and links to external IT-security standards: 'Open Security Information Base'. Regarding the discussions with @robvanderveer we have compiled a YAML structure accepting names and numbers (as alias) to browse through the structure.\r\nAn alpha structure, showing an example for ASVS 4.0.2 is on OWASP's slack channel ['# project-integration'](https://owasp.slack.com/archives/CPMEWT342/p1633279131013100), for now.\r\nWe should consider to bring together this development with the CRE project.", + "summary": "The issue suggests integrating the 'Open Security Information Base' with the CRE project, as developed by the OWASP Top10 team. A YAML structure has been created to organize information about OWASP projects and relevant IT-security standards. An example structure related to ASVS 4.0.2 is available on OWASP's Slack channel. The next steps would involve assessing the potential for collaboration between these two initiatives.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-integration-standards/issues/43", @@ -12607,10 +13029,11 @@ "pk": 439, "fields": { "nest_created_at": "2024-09-11T20:16:21.820Z", - "nest_updated_at": "2024-09-11T20:16:21.820Z", + "nest_updated_at": "2024-09-13T03:57:34.405Z", "node_id": "I_kwDODUeCys52RKj2", "title": "Rename 'Policy Gap Evaluation' in the Wayfinder diagram?", "body": "The [OWASP Developer Guide](https://owasp.org/www-project-developer-guide/) uses the Wayfinder diagram as the structure of the document, so that sections are Requrements, Design etc. One of the sections is 'Policy Gap Evaluation', because of this area of the Wayfinder diagram.\r\n\r\nWhat does 'Policy Gap Evaluation' actually refer to? After some thought it seems to be concerned with 'Security Gap Analysis' used in management systems such as ISO 27001, so the Developer Guide has renamed 'Policy Gap Evaluation' to 'Security Gap Analysis'.\r\n\r\nCould the Wayfinder diagram have 'Policy Gap Evaluation' renamed to 'Security Gap Analysis', but if I have this wrong the Developer Guide section could be named back to 'Policy Gap Evaluation' to align with the Wayfinder diagram.", + "summary": "The issue discusses the need to clarify and potentially rename the 'Policy Gap Evaluation' section in the Wayfinder diagram, as it is currently causing confusion. It suggests that this term may be more accurately represented as 'Security Gap Analysis', aligning with terminology used in the OWASP Developer Guide. The proposed action is to either rename the Wayfinder diagram section to 'Security Gap Analysis' or revert the Developer Guide section back to 'Policy Gap Evaluation' for consistency.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-integration-standards/issues/51", @@ -12633,10 +13056,11 @@ "pk": 440, "fields": { "nest_created_at": "2024-09-11T20:16:59.421Z", - "nest_updated_at": "2024-09-11T20:16:59.421Z", + "nest_updated_at": "2024-09-13T15:21:20.364Z", "node_id": "I_kwDODQO5YM5FJXdo", "title": "Page Must Remove Wiki References", "body": "The wiki should no longer be referenced. Please either move content to these pages or else remove the content. Be advised, regarding the wiki content, I received the following ticket:\r\n------------------\r\nHi , \r\nUnder the link https://wiki.owasp.org/index.php/OWASP_Internet_of_Things_Project#tab=Firmware_Analysis\r\n\r\nIn the table:\r\nOn the \"Device Firmware Tools\" row: \r\nİtem: \r\nBinwalk firmware analysis tool ----> https://binwalk.org/ \r\n\r\nleads to bitcoin gambling website \r\n\r\nI checked this with multiple DNS servers. Firefox DOH, Google 8.8.8.8. I think it is not DNS poisoning.\r\n\r\n------------------\r\n\r\nThe above is what led me to find that you are referencing outdated content as I WAS going to direct them here for up-to-date content.", + "summary": "The issue highlights the need to remove references to the wiki across the project due to outdated content, specifically pointing out that a link to a firmware analysis tool leads to an unrelated website. It suggests either relocating relevant content to the project's pages or eliminating it entirely. Action is required to update or remove the wiki references to ensure accuracy and relevance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-internet-of-things/issues/8", @@ -12665,6 +13089,7 @@ "node_id": "MDU6SXNzdWU1OTUzMTY1ODE=", "title": "sim swapping", "body": "I read more and more articles about the dangers of sim swapping. Would be nice to have some guidelines on how to prevent such attacks.\r\n\r\nExample article https://www.vice.com/en_us/article/pke9zk/paypal-and-venmo-are-letting-sim-swappers-hijack-accounts", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/103", @@ -12696,6 +13121,7 @@ "node_id": "MDU6SXNzdWU3MTU0ODIxNTM=", "title": "Broken Links", "body": "Was going through pages, found plenty of broken references. Ran a [broken URL checker](https://www.deadlinkchecker.com/website-dead-link-checker.asp) against the website, found a lot of broken URLs. Fixed some of them in #290 . Some URLs remain broken:\r\n\r\n```\r\nhttps://support.google.com/mail/forum/AAAAK7un8RU3J3r2JqFNTw/discussion/?hl=en&gpf=d/topic/gmail/3J3r2JqFNTw/discussion\r\nhttps://www.javaworld.com/javaworld/javaqa/2003-05/01-qa-0509-jcrypt.html?page=2\r\nhttp://www.php-security.org/downloads/rips.pdf\r\nhttp://www.seclab.tuwien.ac.at/papers/pixy.pdf\r\nhttp://w2spconf.com/2010/papers/p27.pdf\r\nhttps://www.codemagi.com/blog/post/194\r\nhttps://www.itu.int/rec/T-REC-X.690-200811-I/en\r\nhttps://www.ietf.org/id/draft-ietf-websec-key-pinning-09.txt\r\nhttps://github.com/andresriancho/w3af/blob/master/plugins/grep/csp.py\r\nhttp://blog.php-security.org/archives/76-Holes-in-most-preg_match-filters.html\r\nhttp://www.webapptest.org/ms-access-sql-injection-cheat-sheet-EN.html\r\nhttp://labs.idefense.com/intelligence/vulnerabilities/display.php?id=77\r\nhttp://www.ruxcon.org.au/files/2008/Attacking_Rich_Internet_Applications.pdf\r\nhttp://yehg.net/lab/pr0js/files.php/inspath.zip\r\nhttp://yehg.net/lab/pr0js/files.php/php_brute_force_detect.zip\r\nhttp://www.comptechdoc.org/independent/web/cgi/ssimanual/ssiexamples.html\r\nhttp://www.iss.net/security_center/advice/Exploits/TCP/session_hijacking/default.htm\r\nhttp://www.derkeiler.com/pdf/Mailing-Lists/Securiteam/2002-12/0099.pdf\r\nhttp://archives.neohapsis.com/archives/bugtraq/2002-05/0118.html\r\nhttp://hacker-eliminator.com/trojansymptoms.html\r\nhttp://www.microsoft.com/technet/security/bulletin/MS00-078.mspx\r\nhttps://www.checkmarx.com/Demo/XSHM.aspx\r\nhttps://blog.watchfire.com/wfblog/2008/06/javascript-code.html\r\nhttp://shlang.com/netkill/netkill.html\r\nhttps://cirt.net/code/nikto.shtml\r\nhttps://addons.mozilla.org/en-US/firefox/addon/heartbleed-checker/\r\nhttps://www.ecrimelabs.com/tools/webroot/WebRoot.txt\r\nhttps://www.cs.rice.edu/~scrosby/hash/slides/USENIX-RegexpWIP.2.ppt\r\nhttps://www.checkmarx.com/NewsDetails.aspx?id=23&cat=3\r\nhttps://owasp.org/index.php/Dhiraj_Mishra\r\nhttp://puzzlemall.googlecode.com/files/Session\r\nhttps://owasp.org/index.php/Image:RequestRodeo-MartinJohns.pdf\r\nhttp://windows.stanford.edu/docs/IISsecchecklist.htm\r\nhttp://www.net-security.org/dl/articles/php-file-upload.pdf\r\nhttp://www.windowsitpro.com/Files/18/27072/Webtable_01.pdf\r\nhttps://www.imperva.com/404?aspxerrorpath=/application_defense_center/glossary/forceful_browsing.html\r\nhttp://info.sen.ca.gov/pub/01-02/bill/sen/sb_1351-1400/sb_1386_bill_20020926_chaptered.html\r\nhttps://blog.shapesecurity.com/heartbleed-bug-places-encrypted-user-data-and-webservers-at-risk\r\nhttps://www.mitre.org/sites/default/files/publications/pr-18-2417-deliver-uncompromised-MITRE-study-8AUG2018.pdf\r\nhttp://www.microsoft.com/technet/security/bulletin/ms04-028.mspx\r\nhttp://www.digitaldwarf.be/products/mangle.c\r\nhttp://projects.info-pull.com/mokb/\r\nhttp://www.bonsai-sec.com/en/research/untidy-xml-fuzzer.php\r\nhttps://support.snyk.io/snyk-cli/how-can-i-set-a-snyk-cli-project-as-open-source\r\nhttp://www.rubcast.rub.de/index2.php?id=1009\r\nhttp://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-53r4.pdf\r\nhttp://aeditor.rubyforge.org/ruby_cplusplus/index.html\r\nhttps://owasp-skf.gitbook.io/asvs-write-ups/filename-injection\r\nhttp://tomcat.apache.org/tomcat-6.0-doc/config/context.html\r\nhttps://blog.48bits.com/2010/09/28/iis6-asp-file-upload-for-fun-and-profit/\r\nhttp://palisade.plynt.com/issues/2006Jun/injection-stored-procedures/\r\nhttp://www.bindshell.net/tools/odysseus\r\nhttp://www.ntobjectives.com/products/firewater/\r\nhttp://home.intekom.com/rdawes/exodus.html\r\nhttp://www.wastelands.gen.nz/odysseus/index.php\r\nhttp://www.webcohort.com/web_application_security/research/tools.html\r\nhttp://www.rsasecurity.com/standards/ssl/basics.html\r\nhttp://palisade.plynt.com/issues/2005Aug/page-tokens/\r\nhttp://www.microsoft.com/mspress/books/toc/5612.asp\r\nhttp://www.seczone.cn/2018/06/27/codesec源代码安全检测平台/\r\n```\r\n\r\nIf anyone wants to go through these, `grep --color=always -nr -Ff broken_urls_left.txt|grep --color=always -v \"broken_\"|sort` will show where those URLs are specifically (might miss some of these, though). Could probably also find a lot of broken internal references by looking for `\"wikilink\"`.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/292", @@ -12726,6 +13152,7 @@ "node_id": "MDU6SXNzdWU3Mzg2MTMyNTc=", "title": "HttpOnly needs updates", "body": "I just finished dealing with `auto-migrated` issues for this article, it could definitely use some content updates:\r\nhttps://github.com/OWASP/www-community/blob/master/pages/HttpOnly.md it still talks about old versions of IE and Opera. \r\n\r\nThis article includes an extensive table that needs re-working after the auto-migration as well (which I did not tackle).\r\n\r\nIs Opera even relevant in 2020? Do we still care about IE with Edge/Edge Chrome?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/314", @@ -12757,6 +13184,7 @@ "node_id": "I_kwDODQOsk84_TJQD", "title": "Suggested CSV Injection mitigation does not survive saving and re-opening in Excel", "body": "https://github.com/OWASP/www-community/blob/master/pages/attacks/CSV_Injection.md\r\n\r\nExcel is commonly used to edit CSV files. Unfortunately when saving CSVs Excel strips out some of the characters which are inserted to prevent the CSV injection. This is unfortunate behaviour from Excel, and should really be fixed there, but I'd like to be able to prevent formulas from being inserted into CSVs and run on my user's computers.\r\n\r\nFor most outputs it's possible to completely disallow cells starting with \"=\", and \"@\", irrespective of quoting. But \"-\" is obviously required for numbers.\r\n\r\nOne suggestion for solving this is inserting an extra tab character, which prevents Excel from removing the quotes.\r\n\r\nhttp://georgemauer.net/2017/10/07/csv-injection.html\r\n\r\nReproduction:\r\n\r\nConsider the following CSV:\r\n```csv\r\na,b\r\n,\"'=1+2\"\r\n```\r\n\r\nOpen the CSV, focus on the cell with the formula, and then move the focus away. Save the CSV, it is saved as:\r\n```\r\na,b\r\n,=1+2\r\n```\r\n\r\nOpen the CSV again, the formula is executed and \"3\" is shown in the cell.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/517", @@ -12783,6 +13211,7 @@ "node_id": "I_kwDODQOsk85f89NH", "title": "\"Poor Logging Practice\" page is incomplete", "body": "The page \"[Poor Logging Practice](https://owasp.org/www-community/vulnerabilities/Poor_Logging_Practice)\" is incomplete and is missing information:\r\n- \"Risk Factors\" says \"TBD\"\r\n- A lot of sections contain dummy values, e.g. \"Examples\", \"Related Attacks\", ...\r\n- The sections say \"good practice\" / \"poor practice\" without ever explaining _why_ it is considered good or bad; this is not very helpful\r\n- Given that this is an OWASP page, and the URL path even includes `.../vulnerabilities/...`, the page never properly mentions what the security aspects are. The only vague security related statement it contains is:\r\n > It can also cause log messages accidentally returned to the end users, revealing internal information to attackers.\r\n \r\n > When the use of system output streams is jumbled together with the code that uses loggers properly, the result is often a well-kept log that is missing critical information. In addition, using system output streams can also cause log messages accidentally returned to end users, revealing application internal information to attackers.\r\n \r\n (I am not completely sure though what \"cause log messages accidentally returned to the end users\" is supposed to mean here, is that about log injection (that has its [own page already](https://owasp.org/www-community/attacks/Log_Injection))?)\r\n\r\nIf there are really security aspects to this, then it would be good if they are more clearly highlighted (especially for non-`static final` loggers and multiple loggers). Otherwise if this is just general programming advice maybe this should not be part of the OWASP articles?\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/734", @@ -12809,6 +13238,7 @@ "node_id": "I_kwDODQOsk85-u1K1", "title": "Add to \"Changing Registered Email Address For An Account\" page to describe guidelines for System accounts with multiple email addresses", "body": "> The other thing that bothers me about this - and it absolutely depends on context, what type of app, etc - this flow seems to assume/expect there to be only a single, authoritative email address for the user. Not just identification, but the identity itself. Contrast that to apps (e.g. like Github) wherein you can add multiple email addresses to a single user account. It is an architectural consideration to be sure, but that changes a lot of perspective as well. \r\n> E.g. just adding a 2nd email address to an existing account, you would simply reauthenticate the user, and then send notification-only email to all the other existing addresses on the account (as you do note that Google does), with an option to react or cancel (eg something like \"If this wasnt you, let us know\"). \r\nAnd cannot remove an address or change the primary, until the 2nd is approved. \r\n>\r\n> I would like to make a suggestion, to review other large / reputable / reliable sites to compare this proposed flow with what they actually do, and their existing threat model for that flow. E.g. you mentioned Google, I mentioned Github - worth digging in a bit. My assertion is that we'll find that very few actually go through all this. \r\nThat said, to repeat my earlier comment: it really depends on context and type of app :-)\r\n\r\n_Originally posted by @avidouglen in https://github.com/OWASP/www-community/issues/843#issuecomment-1878447314_\r\n ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/874", @@ -12835,6 +13265,7 @@ "node_id": "I_kwDODQOsk85_mgs8", "title": "device cookie lockout list storage advice?", "body": "On the device cookies idea: https://owasp.org/www-community/Slow_Down_Online_Guessing_Attacks_with_Device_Cookies\r\n\r\n\r\nHow would you store the lockout list entries?\r\n\r\nThe individual entries expire, no?\r\n\r\nPlus you might want a quick reference to \"device cookie\" -> banned(bool), or \"IP\" -> limited(bool), or \"username\" -> limited(bool)\r\n\r\nIs it meant to be stored as an in-memory KV? or stored in Redis for clustering?\r\n\r\nWhat would an ideal table layout be?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/885", @@ -12861,6 +13292,7 @@ "node_id": "I_kwDODQOsk85_zRGz", "title": "Broken images on Session Fixation Page", "body": "Image links are broken on https://github.com/OWASP/www-community/blob/master/pages/attacks/Session_fixation.md\r\nURL: https://owasp.org/www-community/attacks/Session_fixation", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/888", @@ -12892,6 +13324,7 @@ "node_id": "I_kwDODQOsk86BUlda", "title": "Combining Password Hash Plus Limited Preimage Data To Improve Password Hash Security", "body": "Hi All,\r\n\r\nI am posting this question here with the hope that web security pros can chime in. \r\n\r\nThe question was prompted by [my effort to deal](https://github.com/jetty/jetty.project/issues/11489) with weak password hashing in the Jetty application container's default web app security options.\r\n\r\n- Password hashes protect against plain passwords, meaning access to a credential database doesn't automatically mean the ability to use those credentials to access corresponding accounts. \r\n\r\n- In doing so however, we are then faced with the issue of pre-computed hashes, whose preimage string could be used to access user accounts even when a user's actual password differs, because of hash collision. There are rainbow tables with such pre-computed hashes.\r\n\r\n- The mitigations are now a number of hashing schemes intended to make it expensive for an attacker to compute these hashes thus dissuade someone from trying.\r\n\r\n**_In addition to existing mitigations, has there been any consideration given to the idea of pairing hashes with limited password data to counteract hash exploits?_** \r\n\r\nWhat if the option is added to include a limited set of randomly extracted characters from the source password (preimage) along with the position of said characters, then store that information along with the hash. In other words, in addition to what we do now: MD5:PASSWORD_HASH (jetty example), the developer would optionally append one or more sequences of: **PasswordCharacterPasswordCharacterPosition**.\r\n\r\nBelow is an example of what the resulting password hash entry could look like. The hash length is fixed so we know the **t** at the end is the first source password(preimage) data character, the integer after is the position of that character within the source password, the **:** is a delimiter for the limited password data sequence.\r\n\r\n`MD5:3bcc3177cb5efa4d7341d42af8f06418t2:%7`\r\n\r\nWhen a user attempts to authenticate, you validate the hash per usual then additionally validate that those randomly saved password characters are indeed present in the password at the corresponding positions. This would largely nuke the utility of rainbow tables since exploiting hashes is now coupled with the need to in essence know something about the actual source password that was used to create it.\r\n\r\nOf course the password data does reveal some bit of information about the password for anyone who has access to the database, but with a strong enough password this should be a non-issue I think (correct me on this). I imagine there is some scheme that can indicate by how much a password is weaken if you reveal characters and their corresponding position?\r\n\r\n\r\nNow rubbing my Darwin beard as I wait for the take down :)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/901", @@ -12918,6 +13351,7 @@ "node_id": "I_kwDODQOsk86CKkHQ", "title": "Community Events page has a difficult to understand format", "body": "The code that generates a list of community events from Meetup:\r\n\r\npages/social/community_events.md\r\n\r\ngenerates output like:\r\n\r\n![image](https://github.com/OWASP/www-community/assets/5029993/0b09a652-4461-436c-a141-bb30ebad6d21)\r\n\r\nThis is confusing, as the end time is not 4 pm, but another time entirely. This is actually saying the time zone in the image. \r\n\r\nCould the time zone be separated out and made more obvious?\r\n\r\nTime: 10:30 (-4 America/Toronto) \r\n\r\nor just\r\n\r\nTime: 10:30 am (America/Toronto)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/905", @@ -12933,8 +13367,8 @@ "repository": 596, "assignees": [], "labels": [ - 108, - 112 + 112, + 108 ] } }, @@ -12943,10 +13377,11 @@ "pk": 451, "fields": { "nest_created_at": "2024-09-11T20:17:22.147Z", - "nest_updated_at": "2024-09-11T20:17:22.147Z", + "nest_updated_at": "2024-09-13T03:58:11.863Z", "node_id": "I_kwDODQOsk86V3tch", "title": "Would OWASP be interested in publishing a guide on how to do cross-organization mTLS?", "body": "I've been working with cross-organization mTLS for quite a while and the standard guidance (just do whatever you want) is remarkably terrible. \r\n\r\nWould OWASP be interested in publishing a guide on how to do it right that focuses on security, operations, and not emailing certificates around?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-community/issues/991", @@ -12973,6 +13408,7 @@ "node_id": "I_kwDODQOii85DhMco", "title": "Regional Chapter Naming", "body": "From: John DiLeo @johndileo\r\n\r\nWhat is the intended meaning of the statement \"Branches should follow the naming rules of City chapters\"? Is it intended that branches have no naming to indicate they're part of a Regional Chapter, and that their naming make them indistinguishable from City Chapters? It has been our convention to include each branch's city after the name of the chapter (e.g., \"OWASP New Zealand - Auckland\"). If I'm reading the proposed policy correctly, we would be required to rename our branches to \"OWASP Auckland,\" etc., eliminating their identity as part of our Regional Chapter.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/57", @@ -13003,6 +13439,7 @@ "node_id": "I_kwDODQOii85DhNnx", "title": "Chapter and Branch Leader Limits", "body": "From: John DiLeo @johndileo\r\n\r\nPlease clarify the limits on Regional Chapter and Regional Branch leaders' recognition as OWASP Leaders. Is it intended that OWASP grant leader benefits to up to five (5) Regional Chapter Leaders, plus up to five (5) branch leaders, as the draft policy seems to state?\r\n\r\nIf 'yes,' I recommend adjusting the Branch Leader limit to the greater of five (5), or one (1) per Branch, in addition to the five (5) Regional Chapter leaders.\r\n\r\nIf 'no,' I recommend adjusting the overall limit to the greater of five (5), or three (3) plus one (1) per Branch (e.g., for OWASP New Zealand, the initial limit would be six)\"", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/59", @@ -13032,6 +13469,7 @@ "node_id": "I_kwDODQOii85DhPVD", "title": "Reference Meeting Platform Consistently", "body": "From: Lisa Jones @latf6711 \r\n\r\nWe need to be consistent when referring to Meetup in the Policy. Under shared services, we call it a chapter scheduling service for meetings and local events. Under meeting and activity requirements, it is called the OWASP meeting platform.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/61", @@ -13061,6 +13499,7 @@ "node_id": "I_kwDODQOii85DhQLn", "title": "Remove Soft Requirement for Listed Events", "body": "From: Lisa Jones @latf6711 \r\n\r\nIs this a requirement? If not, I would suggest it be removed.\r\n\r\nNB: Per the Policy above, if you misconfigure or do not use the above automation, you will need to do this step manually. Once your event is complete, please add it to the past events tab. For a more detailed example, see [https://owasp.org/www-projectchapter-example/]", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/62", @@ -13090,6 +13529,7 @@ "node_id": "I_kwDODQOii85DhRH5", "title": "Audit Operations Should Remain With Staff", "body": "From: Lisa Jones @latf6711 \r\n\r\nThe chapter committee is going to complete operational audit tasks on Regional Chapters? How is this going to be accomplished?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/63", @@ -13119,6 +13559,7 @@ "node_id": "I_kwDODQOii85DhRvn", "title": "Nonresponsive Leaders", "body": "From: Lisa Jones @latf6711 \r\n\r\nThere needs to be a recourse for leaders not responding. We give them an email address, increased response time to nine days, and the email can be forwarded to any other platform. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/64", @@ -13148,6 +13589,7 @@ "node_id": "I_kwDODQOii85DhSYy", "title": "Soft Requirement for Monitoring", "body": "From: Lisa Jones @latf6711 \r\n\r\nThis statement does not make sense to me? \r\n**Leaders should** monitor and participate in the OWASP Leaders List, other Google groups, and the Slack platform.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/65", @@ -13177,6 +13619,7 @@ "node_id": "I_kwDODQOii85DhYZk", "title": "Use of OWASP Trademarks on Chapter Social Media", "body": "From: Lisa Jones @latf6711 \r\n\r\nChapters are currently allowed to use the OWASP name and logo in various places such as social media accounts as they wish with no guidelines or consequences. This needs to be reviewed. OWASP® is now a registered trademark of the Foundation. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/66", @@ -13205,6 +13648,7 @@ "node_id": "I_kwDODQOii85Dhabp", "title": "Number of Accounts", "body": "From: Lisa Jones @latf6711 \r\n\r\nShared Services\r\nHow many accounts are city/student/regional/branch chapters entitled to have?\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/67", @@ -13235,6 +13679,7 @@ "node_id": "I_kwDODQOii85Dhaqp", "title": "Restrict Advertising Events/Activities not from OWASP", "body": "From: Lisa Jones @latf6711 \r\n\r\nThe use of other organizations' tools and platforms is displayed on the chapter page connected to a URL that needs to be restricted. The promoting of OTHER organizations' events and activities on the OWASP website and OWASP provided tools needs to be prohibited.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/68", @@ -13263,6 +13708,7 @@ "node_id": "I_kwDODQOii85DhdQT", "title": "City Chapters are not restricted to the City Named", "body": "From: Lisa Jones @latf6711 \r\n\r\n`City chapters are defined for a single city only.` This is an incorrect statement. City chapters were named after the primary meeting place of the chapter. City chapters were never defined strictly to that city.\r\n\r\nDefinition of defined: having a definite outline or specification; precisely marked or stated. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/70", @@ -13292,6 +13738,7 @@ "node_id": "I_kwDODQOii85Dhdz9", "title": "Chapter Leader Residency Requirements", "body": "From: Lisa Jones @latf6711 \r\n\r\n`City chapter leaders should reside in the same country as their city chapter.` \r\n\r\nA leader could possibly need to take a plane to get to the city of the chapter in some countries. The above statement was added to the chapter policy. This defeats the building of community in that city and could eliminate the physical participation and assist in holding the meeting. This will lead to leaders in name only to fulfill the 2 leader minimum. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/71", @@ -13320,6 +13767,7 @@ "node_id": "I_kwDODQOii85Dhekz", "title": "Student Chapters Requirements not Operationally Feasible", "body": "From: Lisa Jones @latf6711 \r\n\r\n`Student chapters do not need to meet minimum student leadership requirements during the semester or year breaks. Still, they must come back into compliance within 30 days of starting a new semester or year.`\r\n\r\nWho will keep track of the different schools semester dates and calendar breaks? The person requesting the chapter should still need to create the page within 30 days of approval. Access to the OWASP GitHub page is not governed by each school's breaks. There are only 3 meeting activities required over a 12 month period. I am not sure why the statement was added. It is not necessary and creates more confusion. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/72", @@ -13349,6 +13797,7 @@ "node_id": "I_kwDODQOii85Dhl8J", "title": "Leaders Without Benefits not Operationally Feasible", "body": "From: Lisa Jones @latf6711 \r\n\r\n```\r\nRegional Chapter Leadership\r\nRegional Leadership management is the same as per City chapters;\r\nRegional chapters must have at least three and usually a maximum of five OWASP Foundation-recognized leaders. If the region has more than five associated locations, it can have up to seven (7) leaders.\r\n```\r\n\r\nThe two statements above contradict each other and should be eliminated. If regional chapters are allowed to have 7 leaders but only 5 are eligible for leader benefits. This can not be managed as is. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/73", @@ -13378,6 +13827,7 @@ "node_id": "I_kwDODQOii85DhmyP", "title": "Force Requirement to Accept Github Invitation For All", "body": "From: Lisa Jones @latf6711 \r\n\r\n`At least one leader, but preferably all leaders, must accept the GitHub invite before it expires. A leader with chapter repository admin rights can grant other leaders admin access.\r\n\r\nThis statement is creating a single point of failure. This is why we ask for a minimum of 2 leaders. This contradicts the requirements in the request ticket and the statement in Share Services. \r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/74", @@ -13407,6 +13857,7 @@ "node_id": "I_kwDODQOii85DhorL", "title": "How to Validate Starting a New City Chapter Requirements", "body": "From: Lisa Jones @latf6711 \r\n\r\nRegarding regional chapters...\r\n\r\n`All “Starting a New City chapter” requirements must be met`\r\n\r\nHow will the this be validated?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/75", @@ -13436,6 +13887,7 @@ "node_id": "I_kwDODQOii85DhpWy", "title": "Operational Overhead and Clarification - City/Regional Chapter Agreement ", "body": "From: Lisa Jones @latf6711 \r\n\r\n`All City chapter leaders within the defined region must agree to the creation of the new Regional Chapter, and there must be no dissenting City chapter leaders within the region. `\r\n\r\nThe agreement needs to be documented and kept on file in case of future disagreements. No verbal agreement will be accepted. \r\n\r\nFrom Me: And does that mean any new city chapters in the region get to have a say after the regional is formed?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/76", @@ -13465,6 +13917,7 @@ "node_id": "I_kwDODQOii85DhqAo", "title": "Regional Approval Process", "body": "From: Lisa Jones @latf6711 \r\n\r\n`The OWASP Foundation will only create regional Chapters with the approval of the Chapter Committee after confirming regional policy requirements are met. `\r\n\r\nWhat is the process for this to be completed? How will the chapter committee confirm that the requirements have been met? How often will they review and respond?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/77", @@ -13494,6 +13947,7 @@ "node_id": "I_kwDODQOii85DhrzZ", "title": "Operational Functionality Should not be Handled By Committee", "body": "From: Lisa Jones @latf6711 \r\n\r\nWho will oversee that the Chapter Committee is fulfilling and completing their responsibilities that are outlined in the policy?\r\nIf they are not completed in a reasonable timeframe or there is an issue who should we forward the inquiries to?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/79", @@ -13523,6 +13977,7 @@ "node_id": "I_kwDODQOii85Dhwa9", "title": "Emphasize Enabling the Community", "body": "From: Dirk Wetter @drwetter\r\n\r\nThe additional sentence emphasizes enabling the community. \r\n\r\n**Current:** `Chapters are central to OWASP’s mission of building a community of appsec professionals and developers around the world. `\r\n\r\n**Suggested**: `As OWASP's mission is bringing software security to the people, it is essential that the OWASP foundation is enabling the community and supporting them wherever possible. Chapters are central to OWASP’s mission of building a community of appsec professionals and developers around the world.\r\n\r\n`", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/80", @@ -13552,6 +14007,7 @@ "node_id": "I_kwDODQOii85DhzHR", "title": "Committees Operational Monitoring and Communication Clarification", "body": "From: Dirk Wetter @drwetter \r\n\r\n**Current:** `The Chapter Committee will additionally monitor this yearly.`\r\n\r\n**Suggested:** `The Chapter Committee will additionally monitor this yearly and discuss deficiencies with the Regional Chapter if necessary.`\r\n\r\nThe reasoning is to establish communication before taking action.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/83", @@ -13577,21 +14033,22 @@ "pk": 473, "fields": { "nest_created_at": "2024-09-11T20:18:02.645Z", - "nest_updated_at": "2024-09-11T20:18:02.645Z", + "nest_updated_at": "2024-09-13T15:21:31.443Z", "node_id": "I_kwDODQOii85Or3Xy", "title": "Bylaws reconcilliation - 15% everywhere", "body": "Let's make all splits/admin overhead 15% throughout all policies.\r\n", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/104", "number": 104, "sequence_id": 1320121842, "is_locked": false, "lock_reason": "", "comments_count": 1, - "closed_at": null, + "closed_at": "2024-09-13T12:28:52Z", "created_at": "2022-07-27T21:06:14Z", - "updated_at": "2022-07-28T12:53:26Z", + "updated_at": "2024-09-13T12:28:52Z", "author": 198, "repository": 597, "assignees": [ @@ -13607,21 +14064,22 @@ "pk": 474, "fields": { "nest_created_at": "2024-09-11T20:18:04.368Z", - "nest_updated_at": "2024-09-11T20:18:04.368Z", + "nest_updated_at": "2024-09-13T15:21:32.824Z", "node_id": "I_kwDODQOii85Or3vH", "title": "Bylaw Reconcilliation - Update Membership Policy for new bylaws", "body": "- Individual members - all forms of \r\n- Complimentary members - no vote\r\n- Leaders - must be a financial member\r\n- Leadership requirements", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/105", "number": 105, "sequence_id": 1320123335, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:52Z", "created_at": "2022-07-27T21:08:05Z", - "updated_at": "2022-07-27T21:08:05Z", + "updated_at": "2024-09-13T12:28:52Z", "author": 198, "repository": 597, "assignees": [ @@ -13637,21 +14095,22 @@ "pk": 475, "fields": { "nest_created_at": "2024-09-11T20:18:05.983Z", - "nest_updated_at": "2024-09-11T20:18:05.983Z", + "nest_updated_at": "2024-09-13T15:21:34.189Z", "node_id": "I_kwDODQOii85Ouk7Z", "title": "Bylaw Policy Reconcilliation - spell check", "body": "", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/106", "number": 106, "sequence_id": 1320832729, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:52Z", "created_at": "2022-07-28T12:05:39Z", - "updated_at": "2022-07-28T12:05:40Z", + "updated_at": "2024-09-13T12:28:52Z", "author": 198, "repository": 597, "assignees": [ @@ -13667,21 +14126,22 @@ "pk": 476, "fields": { "nest_created_at": "2024-09-11T20:18:07.717Z", - "nest_updated_at": "2024-09-11T20:18:07.717Z", + "nest_updated_at": "2024-09-13T15:21:35.518Z", "node_id": "I_kwDODQOii85OulA3", "title": "Bylaw Policy Reconcilliation - Markdown Lint", "body": "", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/107", "number": 107, "sequence_id": 1320833079, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:53Z", "created_at": "2022-07-28T12:05:57Z", - "updated_at": "2022-07-28T12:05:57Z", + "updated_at": "2024-09-13T12:28:53Z", "author": 198, "repository": 597, "assignees": [ @@ -13697,21 +14157,22 @@ "pk": 477, "fields": { "nest_created_at": "2024-09-11T20:18:09.456Z", - "nest_updated_at": "2024-09-11T20:18:09.456Z", + "nest_updated_at": "2024-09-13T15:21:36.975Z", "node_id": "I_kwDODQOii85OulI7", "title": "Bylaw Policy Reconcilliation - Change address to Edgewater Place", "body": "", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/108", "number": 108, "sequence_id": 1320833595, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:53Z", "created_at": "2022-07-28T12:06:24Z", - "updated_at": "2022-07-28T12:06:24Z", + "updated_at": "2024-09-13T12:28:53Z", "author": 198, "repository": 597, "assignees": [ @@ -13727,21 +14188,22 @@ "pk": 478, "fields": { "nest_created_at": "2024-09-11T20:18:11.145Z", - "nest_updated_at": "2024-09-11T20:18:11.145Z", + "nest_updated_at": "2024-09-13T15:21:38.402Z", "node_id": "I_kwDODQOii85OunTE", "title": "Bylaws policy reconcilliation - remove references to the wiki", "body": "Search for wiki & change to website\r\nSearch for index.php and find new URL", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/109", "number": 109, "sequence_id": 1320842436, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:53Z", "created_at": "2022-07-28T12:14:05Z", - "updated_at": "2022-07-28T12:14:05Z", + "updated_at": "2024-09-13T12:28:53Z", "author": 198, "repository": 597, "assignees": [ @@ -13757,21 +14219,22 @@ "pk": 479, "fields": { "nest_created_at": "2024-09-11T20:18:12.823Z", - "nest_updated_at": "2024-09-11T20:18:12.823Z", + "nest_updated_at": "2024-09-13T15:21:39.745Z", "node_id": "I_kwDODQOii85Ounle", "title": "Bylaws policy reconcilliation - remove outdated chapter handbook / advice", "body": "Point to the new Chapter Handbook", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/110", "number": 110, "sequence_id": 1320843614, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:54Z", "created_at": "2022-07-28T12:15:11Z", - "updated_at": "2022-07-28T12:15:11Z", + "updated_at": "2024-09-13T12:28:54Z", "author": 198, "repository": 597, "assignees": [ @@ -13787,21 +14250,22 @@ "pk": 480, "fields": { "nest_created_at": "2024-09-11T20:18:14.479Z", - "nest_updated_at": "2024-09-11T20:18:14.479Z", + "nest_updated_at": "2024-09-13T15:21:41.086Z", "node_id": "I_kwDODQOii85OvQlZ", "title": "Bylaw policy reconcilliation - election policy update", "body": "Add detail about non-voting members\r\nAdd detail around bylaw amendment, CoI and replacement", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/112", "number": 112, "sequence_id": 1321011545, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:54Z", "created_at": "2022-07-28T14:22:10Z", - "updated_at": "2022-07-28T14:22:11Z", + "updated_at": "2024-09-13T12:28:54Z", "author": 198, "repository": 597, "assignees": [ @@ -13817,21 +14281,22 @@ "pk": 481, "fields": { "nest_created_at": "2024-09-11T20:18:16.126Z", - "nest_updated_at": "2024-09-11T20:18:16.126Z", + "nest_updated_at": "2024-09-13T15:21:42.450Z", "node_id": "I_kwDODQOii85OvRRI", "title": "Update COVID policy - July 2022", "body": "Change to endemic coverage", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/113", "number": 113, "sequence_id": 1321014344, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:54Z", "created_at": "2022-07-28T14:24:10Z", - "updated_at": "2022-07-28T14:24:10Z", + "updated_at": "2024-09-13T12:28:54Z", "author": 198, "repository": 597, "assignees": [ @@ -13847,21 +14312,22 @@ "pk": 482, "fields": { "nest_created_at": "2024-09-11T20:18:17.767Z", - "nest_updated_at": "2024-09-11T20:18:17.767Z", + "nest_updated_at": "2024-09-13T15:21:43.822Z", "node_id": "I_kwDODQOii85OwThK", "title": "Update signing authority to include staff", "body": "Staff have a BAU requirement to approve expenses. We need a limit in the signing authority policy. \r\n", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/114", "number": 114, "sequence_id": 1321285706, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:55Z", "created_at": "2022-07-28T17:54:15Z", - "updated_at": "2022-07-28T17:54:15Z", + "updated_at": "2024-09-13T12:28:55Z", "author": 198, "repository": 597, "assignees": [ @@ -13877,21 +14343,22 @@ "pk": 483, "fields": { "nest_created_at": "2024-09-11T20:18:19.413Z", - "nest_updated_at": "2024-09-11T20:18:19.413Z", + "nest_updated_at": "2024-09-13T15:21:45.186Z", "node_id": "I_kwDODQOii85UugpF", "title": "Chapter Policy", "body": "I'm afraid I have to disagree with referencing a 1997 memo from IETF to write an operational business policy. Polices need to be enforceable, with consequences for noncompliance. It also needs to be simple and understandable, even for non-technical individuals. \r\nLast, it needs to be manageable as just a part of a single team member's job responsibilities.\r\n\r\n \r\nWords such as **SHALL\", \"SHALL NOT,\" \"SHOULD,\" \"SHOULD NOT,\" \"RECOMMENDED,\" \"MAY,\" and \"OPTIONAL\"** \r\nImmediately, are not policy and if removed will shorten the document. {The RFC 2119 - Keywords for use in [RFCs ](https://www.ietf.org/rfc/rfc2119.txt)to Indicate Requirement Levels in March 1997.}\r\n\r\n**Definitions:**\r\n- Polices are requirements that are enforceable with consequences.\r\n- Guidelines are recommendations that are not enforceable with NO consequences.\r\n- Procedures establish the defined practices or steps to accomplish the objective.\r\n\r\n### Characteristics of Business Policies\r\n**Effective policies have the following characteristics:**\r\n\r\n1. It must be clear, specific, and easily understood\r\n2. Specify company rules\r\n3. Explain their purpose\r\n4. State when policy should be applied\r\n5. Who it covers\r\n6. Method of enforcement\r\n7. Describe the consequences of non-compliance\r\n\r\n**Policy Writing Guidance**\r\n1. Keep it simple. Policies should be written in plain language – not legalese. ...\r\n2. Keep it general. Policies cannot contemplate all possible situations. ...\r\n3. Make it relevant. ...\r\n4. Check for accuracy and compliance. ...\r\n5. Ensure the policy can be enforced. ...\r\n6. Clearly state who does what. ...\r\n7. Less is more.\r\n\r\n**Benefits of Policies**\r\nWell-written policies give employees a way to handle problems and issues without constantly involving management whenever they need to make a decision. Policies define the limits of decision-making and outline the alternatives. Employees understand the constraints of their jobs. Reactivation can be justified on a case-by-case basis. \r\n\r\n\r\n", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/119", "number": 119, "sequence_id": 1421478469, "is_locked": false, "lock_reason": "", "comments_count": 1, - "closed_at": null, + "closed_at": "2024-09-13T12:28:55Z", "created_at": "2022-10-24T21:31:10Z", - "updated_at": "2022-10-25T19:48:22Z", + "updated_at": "2024-09-13T12:28:55Z", "author": 200, "repository": 597, "assignees": [], @@ -13903,21 +14370,22 @@ "pk": 484, "fields": { "nest_created_at": "2024-09-11T20:18:20.282Z", - "nest_updated_at": "2024-09-11T20:18:20.282Z", + "nest_updated_at": "2024-09-13T15:21:45.899Z", "node_id": "I_kwDODQOii85U0eCq", "title": "Leaders", "body": "- Leaders are not required to be paid OWASP members yet we give them a FREE OWASP account that we don't require them to use. \r\n- I want the statement added to the policy that all OWASP Foundation communications, requests, and expense reimbursement approvals. will be sent to the leaders' OWASP email addresses. \r\n- Allowing 60 days for a leader to respond is not reasonable. The IRS requires expenses to be submitted within 60 days. Members will wait at most a week (7 days) for a leader to respond to an email sent.\r\n\r\n\r\n", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/120", "number": 120, "sequence_id": 1423040682, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:55Z", "created_at": "2022-10-25T20:32:56Z", - "updated_at": "2022-10-25T20:32:56Z", + "updated_at": "2024-09-13T12:28:55Z", "author": 200, "repository": 597, "assignees": [], @@ -13929,21 +14397,22 @@ "pk": 485, "fields": { "nest_created_at": "2024-09-11T20:18:21.102Z", - "nest_updated_at": "2024-09-11T20:18:21.102Z", + "nest_updated_at": "2024-09-13T15:21:46.584Z", "node_id": "I_kwDODQOii85U5g2F", "title": "Expense Policy ", "body": "For the items listed above and expenses under USD $250, there are no pre-set limits beyond the “fair and reasonable” test. Expenses not listed and exceeding USD $250 require approval from at least two leaders (or a leader + relevant committee if there is only one leader), and [pre-approval](https://owasporg.atlassian.net/servicedesk/customer/portal/4/group/14) by the Executive Director or their designate. When in doubt, individuals requesting expense reimbursement should apply for pre-approval.\r\n\r\n[](https://owasporg.atlassian.net/servicedesk/customer/portal/4/group/14) > This link goes to Funding Grant Requests.\r\n\r\nAndrew had me create a JIRA ticket for [Pre-Approval of Chapter Expenses](https://owasporg.atlassian.net/servicedesk/customer/portal/8/group/20/create/107). If I need to remove the ticket and chapter leaders need to use the Funding Grant Request ticket, please let me know if it is causing confusion. ", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/www-policy/issues/121", "number": 121, "sequence_id": 1424362885, "is_locked": false, "lock_reason": "", "comments_count": 0, - "closed_at": null, + "closed_at": "2024-09-13T12:28:56Z", "created_at": "2022-10-26T16:44:35Z", - "updated_at": "2022-10-26T16:44:35Z", + "updated_at": "2024-09-13T12:28:56Z", "author": 200, "repository": 597, "assignees": [], @@ -13959,6 +14428,7 @@ "node_id": "I_kwDODQOii85Vw3zI", "title": "Supporters and Bartering Arrangements", "body": "This section is very confusing to leaders. \r\nMaybe seperate the Bartering arrangements for the Contractual supporter agreements.\r\nThe (https://contact.owasp.org/) goes to General is there a specific topic the leader should submit to?\r\n\r\n\r\nSupporters and Bartering Arrangements\r\nChapter leaders are encouraged to obtain local chapter supporters via bartering arrangements (i.e., services, event spaces, or food and beverages paid for by a chapter supporter) and donations via the OWASP website. Chapters can define the levels and benefits of local Chapter Supporters, including logos on the introduction slides and the Chapter home page. Any contractual agreement, bartering arrangement, or financial transaction must be registered and processed by the OWASP Foundation through our [service desk](https://contact.owasp.org/)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/122", @@ -13985,6 +14455,7 @@ "node_id": "I_kwDODQOii85V4F0Q", "title": "Chapter Funding Pre-Approval URL needs to be updated", "body": "Shared Services\r\nOWASP Foundation will provide chapters with the following shared services at no cost. Chapters are encouraged to make use of these. These services can be accessed by submitting a ticket via [Contact Us](https://contact.owasp.org/). If, after 9 business days have passed, and reasonable efforts have been made to utilize shared services, no response is received, the chapter may, with due care, seek a reasonable alternative, filling out a [Chapter Funding Pre-Approval]**(https://owasporg.atlassian.net/servicedesk/customer/portal/8/group/20/create/107*) ticket.\r\n\r\nUpdate the Chapter Funding Pre-Approval URL to https://owasporg.atlassian.net/servicedesk/customer/portal/4\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/123", @@ -14011,6 +14482,7 @@ "node_id": "I_kwDODQOii85bcEhZ", "title": "دانشجو", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/126", @@ -14037,6 +14509,7 @@ "node_id": "I_kwDODQOii857DbIm", "title": "Events Policy \"Events in a box\" link dead", "body": "https://owasp.org/www-policy/operational/events\r\n\r\nAT the bottom the link to \"Events in a box\" is dead.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/136", @@ -14063,6 +14536,7 @@ "node_id": "I_kwDODQOii8584LR8", "title": "Google's feedback ", "body": "![Screenshot_20240122-190527.png](https://github.com/OWASP/www-policy/assets/152741572/a2731731-3203-403a-b2df-799334ccf117)\n\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/137", @@ -14085,10 +14559,11 @@ "pk": 491, "fields": { "nest_created_at": "2024-09-11T20:18:26.302Z", - "nest_updated_at": "2024-09-11T20:18:26.302Z", + "nest_updated_at": "2024-09-13T15:21:47.278Z", "node_id": "I_kwDODQOii85_Zp9K", "title": "Website shows ancient WIP draft Chapters policy", "body": "When browsing on the OWASP website (owasp.org) and you browse to chapter policies\r\n\r\nAbout->Policies\r\n\r\nThen select [Chapters Policy](https://owasp.org/www-policy/operational/chapters)\r\n\r\nYou will notice on the top right a \"next\" selection entitled \"Chapters Policy - Draft (WIP)\"\r\n\r\nWhen selecting this entry you will be sent to a page that is out of date and states:\r\n\r\n\"Members are invited to provide feedback on this draft policy until January 22, 1970\"\r\n\r\nThis page needs to be removed or at least updated.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-policy/issues/138", @@ -14111,10 +14586,11 @@ "pk": 492, "fields": { "nest_created_at": "2024-09-11T20:18:30.994Z", - "nest_updated_at": "2024-09-11T20:18:30.994Z", + "nest_updated_at": "2024-09-13T15:21:50.896Z", "node_id": "I_kwDODP8dT85BDtP1", "title": "Incorrect Mail To links", "body": "For the linked emails each email has // before the email address so the mail to address will not function correctly. \r\nExample: mailto://test@owasp.org instead of mailto:test@owasp.org\r\nThis is also present on the \"OWASP WIA, Diversity and Inclusion Committee\" page.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-compliance/issues/1", @@ -14137,10 +14613,11 @@ "pk": 493, "fields": { "nest_created_at": "2024-09-11T20:18:35.207Z", - "nest_updated_at": "2024-09-11T20:18:35.207Z", + "nest_updated_at": "2024-09-13T15:21:54.350Z", "node_id": "I_kwDODP8ZEc5AJp1h", "title": "Consider renaming default branch to \"main\"", "body": "Please consider renaming the default branch from \"master\" to \"main\"\r\n\r\nThe process, and detailed explanation/justification can be found at this link:\r\nhttps://github.com/github/renaming\r\n\r\nIf possible, could this be done on every other OWASP open source project? Some companies are getting stricter about inclusive language, and this can help them ensure that their own code that references OWASP projects stays in compliance with design standards.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-committee-wia/issues/6", @@ -14167,6 +14644,7 @@ "node_id": "I_kwDODOEwJ86Hksim", "title": "OWASP Mobile Top 10 refers to MASVS ", "body": "Hi,\r\nCan we do (and help me to) mapping OWASP Mobile Top 10 to MASVS ?\r\n\r\nFor example:\r\nM1 (2024) - Improper Credential Usage:\r\n - MASVS-STORAGE\r\n - MASVS-...\r\nM2 (2024) - Inadequate Supply Chain Security:\r\n - MASVS-...\r\n - MASVS-...\r\n...\r\n...\r\nM10 (2024) - Insufficient Cryptography:\r\n - MASVS-...\r\n - MASVS-...\r\n\r\nThanks.", + "summary": "The issue discusses the need for mapping the OWASP Mobile Top 10 vulnerabilities to the Mobile Application Security Verification Standard (MASVS). The request includes examples for specific vulnerabilities, like M1 (Improper Credential Usage) and M2 (Inadequate Supply Chain Security), and seeks to establish corresponding MASVS categories for each. To address this, a detailed mapping of all M1 to M10 vulnerabilities to their relevant MASVS counterparts needs to be created.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-mobile-top-10/issues/63", @@ -14189,10 +14667,11 @@ "pk": 495, "fields": { "nest_created_at": "2024-09-11T20:19:00.154Z", - "nest_updated_at": "2024-09-11T20:19:00.154Z", + "nest_updated_at": "2024-09-13T15:22:13.415Z", "node_id": "I_kwDODOEwJ86OePLH", "title": "How to mapping OWASP Mobile Top 10 to CWE", "body": "Dear Crew Staff\r\nI'm a researcher of mobile secuity. OWASP Mobile top 10 is the critical issue to solve and discuss so according to my research, I wonder if there is a mapping table that can help me to map owasp mobile top 10 and CWE or owasp top 10. I survey lots of document and nothing can solve my problem. Could anyone help me if the mapping table exist? Or the inner document can release privately. Thanks a lot.", + "summary": "The issue discusses the need for a mapping table that connects the OWASP Mobile Top 10 vulnerabilities to the Common Weakness Enumeration (CWE) or the OWASP Top 10. The researcher has not found any existing documentation that addresses this need and is seeking assistance in locating such a mapping or obtaining related documents privately. It may be helpful to explore existing resources or databases that specialize in vulnerability mappings or to consider creating a new mapping table if one does not exist.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-mobile-top-10/issues/64", @@ -14215,10 +14694,11 @@ "pk": 496, "fields": { "nest_created_at": "2024-09-11T20:19:11.900Z", - "nest_updated_at": "2024-09-11T20:19:11.900Z", + "nest_updated_at": "2024-09-13T15:22:22.506Z", "node_id": "I_kwDODMGC3c5qYHjA", "title": "run a daily workflow action to check all links ", "body": "It would be good to automate checking the links for the SecurityRAT OWASP project pages, in particular :\r\n* https://securityrat.org/\r\n* https://securityrat.github.io/", + "summary": "Implement a daily workflow action to automate link checking for SecurityRAT OWASP project pages. Focus on the following URLs:\n\n- https://securityrat.org/\n- https://securityrat.github.io/\n\n1. Utilize a CI/CD tool like GitHub Actions to schedule the daily workflow.\n2. Set up a link-checking tool, such as `html-proofer` or `linkchecker`, within the workflow.\n3. Create a script that runs the link checker against the specified URLs.\n4. Configure the workflow to trigger on a daily schedule using cron syntax.\n5. Ensure notifications are sent (e.g., via email or Slack) if broken links are detected.\n6. Test the workflow to confirm it runs correctly and handles errors gracefully.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-securityrat/issues/6", @@ -14245,6 +14725,7 @@ "node_id": "MDU6SXNzdWU1OTQzODU5Mzc=", "title": "v1.0 release versions seems not to be working", "body": "I downloaded the Raspberry Pi 2 image, used balenaEtcher to write it to the SD card. Then I had tried booting it in Raspberry Pi 4 , but all I got is just a black screen, I also tried to boot it in Raspberry Pi 2, but it just hangs with the rainbow screen. Both versions were tried (sysupgrade and the other one). Later on, I have tried using another software for writing to SD – Win32DiskImager. But that didn‘t helped. To make sure that this is not related to my machine, I used other computers to write to the card, but the effect is the same. \r\n\r\nSome additional information\r\n•\tOS that I use: Windows 10\r\n•\tSize of the SD cards I‘ve tried to write to: 64 GB and 16 GB\r\n\r\nI‘ve also tried VDI image with VirtualBox (version 6.1.4 r136177). It gets to the GRUB loader, but after this, it just crashes. From what I can see from logs, It looks like that it results in critical error when trying to boot the kernel. \r\n\r\nI’ve also downloaded VMware to check if this would work, but unfortunately this seems to be not working also. After the grub loader screen, it starts booting, but at some point (crng init done), it just hangs. I’ve waited for about 40 minutes, but nothing progressed.\r\n", + "summary": "There are issues with the v1.0 release versions not booting correctly on Raspberry Pi devices. The user attempted to write the Raspberry Pi 2 image to an SD card using multiple tools (balenaEtcher and Win32DiskImager) and tested it on both Raspberry Pi 4 and 2, but encountered a black screen and a hanging rainbow screen, respectively. Additionally, attempts to run the image in VirtualBox and VMware resulted in crashes or hanging after the GRUB loader. The user is using Windows 10 and has tried both 64 GB and 16 GB SD cards. \n\nTo address this issue, it may require investigating the integrity of the release images or exploring compatibility with the Raspberry Pi models.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoTGoat/issues/2", @@ -14271,6 +14752,7 @@ "node_id": "MDU6SXNzdWU2NzUxNjQ2NTU=", "title": "IoTGoat as a container", "body": "Run IoTGoat within a docker container https://github.com/openwrt/docker", + "summary": "The issue suggests running IoTGoat within a Docker container, referencing the OpenWrt Docker repository. To address this, the next steps involve creating a suitable Dockerfile or configuration that can encapsulate IoTGoat, ensuring its dependencies and environment are properly set up for containerization.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoTGoat/issues/4", @@ -14293,10 +14775,11 @@ "pk": 499, "fields": { "nest_created_at": "2024-09-11T20:19:18.133Z", - "nest_updated_at": "2024-09-12T00:15:15.904Z", + "nest_updated_at": "2024-09-13T17:00:02.383Z", "node_id": "I_kwDODKzRYM5uc4an", "title": "Not able to build from source", "body": "Hello. I am facing an error while building from the source. The error is regarding absence of gcc and g++ but in my kali linux virtual machine I have gcc and g++ v13.1.0. \r\n\"IoTGoat\"\r\n", + "summary": "The issue reported involves an error during the build process from source, specifically indicating that gcc and g++ are absent despite having version 13.1.0 installed on a Kali Linux virtual machine. To resolve this, it may be beneficial to check the system paths for gcc and g++, ensure that they are correctly set, or verify that the build environment is configured to recognize these compilers.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoTGoat/issues/9", @@ -14323,6 +14806,7 @@ "node_id": "I_kwDODKzRYM5zJY1p", "title": "port 5000 closed by default?", "body": "upnp port 5000 seems to be closed. tired restarting the machine (VM in oracle virtual box). do I need to activate upnp service myself?\r\n![issue](https://github.com/OWASP/IoTGoat/assets/138758418/71e61a2b-bd96-4c4e-a889-2950d8028fd1)\r\n", + "summary": "The issue reports that port 5000 appears to be closed by default, even after restarting the virtual machine in Oracle VirtualBox. The user is questioning whether they need to manually activate the UPnP service to resolve this. To address the problem, it may be necessary to check the configuration settings related to UPnP and ensure that the service is enabled and properly configured.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/IoTGoat/issues/10", @@ -14345,10 +14829,11 @@ "pk": 501, "fields": { "nest_created_at": "2024-09-11T20:21:59.383Z", - "nest_updated_at": "2024-09-11T20:21:59.383Z", + "nest_updated_at": "2024-09-13T15:24:28.713Z", "node_id": "I_kwDODGgRDc5nCnH-", "title": "Non-Responsive Website", "body": "## Issue\r\nThe website does not appear to be responsive and does not adapt to different screen sizes or resolutions. As a result, the content gets distorted, and the layout becomes unorganized, making it challenging to read and access the information.\r\n\r\n## Impact\r\nThe lack of responsiveness negatively affects user experience and hinders accessibility. With the increasing number of users accessing websites from mobile devices, it is crucial to ensure a responsive design to provide a seamless browsing experience.\r\n\r\nSteps to Reproduce:\r\n\r\n- Open the website on a Chrome or Firefox browser.\r\n- Resize the browser window to simulate different screen sizes or resolutions.\r\n- Observe the layout and content display.\r\n- Expected Behavior:\r\nThe website should adapt to different screen sizes, maintaining a consistent layout, readability, and usability across devices.\r\n\r\n## Actual Behavior\r\nThe website does not adjust its layout when viewed on smaller devices, resulting in content overlapping, text being cut off, and elements becoming difficult to interact with.\r\n\r\n## Attached Screenshots\r\n\"image\"\r\n\r\nPlease let me know if you require any further information to address this issue effectively. I believe that resolving this problem will greatly enhance the user experience of the OWASP Netherlands Chapter website.\r\n\r\nThank you for your attention to this matter.\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-netherlands/issues/1", @@ -14371,10 +14856,11 @@ "pk": 502, "fields": { "nest_created_at": "2024-09-11T20:22:27.862Z", - "nest_updated_at": "2024-09-11T20:22:27.862Z", + "nest_updated_at": "2024-09-13T15:24:50.928Z", "node_id": "MDU6SXNzdWU4MjAxNDY4ODQ=", "title": "Blank link and broken image", "body": "On the index page edgescan is a blank link and the EY image is broken - assuming this should be Mmk?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-dublin/issues/2", @@ -14397,10 +14883,11 @@ "pk": 503, "fields": { "nest_created_at": "2024-09-11T20:25:44.120Z", - "nest_updated_at": "2024-09-11T20:25:44.120Z", + "nest_updated_at": "2024-09-13T15:55:13.001Z", "node_id": "I_kwDODGgJzM5mQudb", "title": "Social Media Links no longer work", "body": "While trying to get ahold of the OWASP chapter in omaha, I found the following links no longer work:\r\n- https://www.facebook.com/blu3gl0w13\r\n- https://www.instagram.com/blu3gl0w13/", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-omaha/issues/27", @@ -14423,10 +14910,11 @@ "pk": 504, "fields": { "nest_created_at": "2024-09-11T20:30:51.744Z", - "nest_updated_at": "2024-09-11T20:30:51.744Z", + "nest_updated_at": "2024-09-13T15:59:21.954Z", "node_id": "MDU6SXNzdWU5MjA2Nzg1OTU=", "title": "Please re-activate using activation policy", "body": "Ricardo and Hector,\r\n\r\nThe Paraguay chapter has been suspended due to inactivity. The last meeting activity on the OWASP chapter page was March of 2019. The chapter has not submitted the ticket to request shared services like the Meetup Pro account and zoom to host virtual meetings. \r\n\r\nMany chapters failed to complete the chapter re-activation process and have been deactivated. This doesn't mean gone forever; it means you need to take some additional steps, very similar to what you needed to do before the deadlines. Under the Chapter Policy, you can contact the Chapter Committee and ask for new leadership or elections, or you can follow this re-activation process:\r\n\r\nThe complete re-activation process:\r\nFind at least 50% new leadership. There is a two leader minimum requirement for all new and re-activated chapters; ensure that the chapter requirements do not fall to one person. If life gets busy for one leader, members can still have chapter meetings in their area.Enter a ticket to re-activate your chapter, and use this ticket to add the new leaders to your chapter.https://owasporg.atlassian.net/servicedesk/customer/portal/7/group/18/create/73 ALL requested information for all the leaders is to be included in the ticket to process.Once the GitHub and Meetup are active, a minimum of two leaders has access to GitHub and Meetup. Preferably all leaders should have access to spread the load.Follow the migration instructions to customize your chapter home page:Simple to follow instructions https://owasp.org/migration/Simple to follow video https://youtu.be/tEm-YCeQno0 If you are getting build errors, install a local Jekyll instance following the instructions in the Migration Guide and work out what's wrong. If you still can't fix it, please log a non-funding request at https://contact.owasp.org straight away.Schedule a meeting or activity that the public can attend to occur within 90 days, and make sure it's on your chapter's home page. If you use Meetup Pro, add this to your index.md file to automatically be included:{% include chapter_events.html group=page.meetup-group %}This only works if the OWASP Foundation is the organizer of your Meetup, you are co-organizers and the metadata on the index.md includes your meetup groups name:\r\nmeetup-group: OWASP-Colorado-Springs-Meetup\r\nLastly, it would be best if you kept in contact with your members, speakers, the Chapter Committee, or the OWASP Foundation by monitoring and responding to your owasp.org email address. It's okay to forward this email address to another you use, but we strongly prefer leaders using the owasp.org email address for official chapter business. The policy requires you to respond within 7 to 30 days depending on the request, so please make sure you're regularly checking mails, particularly in the lead-up to your meetings.\r\n\r\nYou can find the Monthly Slides and many helpful resources, such as the speaker's bureau at the Chapter Committee's Resources page:\r\nhttps://owasp.org/www-committee-chapter/#div-resources_for_chapters\r\nYou can find the Chapter Policy, to which this process complies here: \r\nhttps://owasp.org/www-policy/operational/chapters\r\nThe Chapter policy requires that a meeting is held within 90 days of re-activation, or leadership will again be cleared and the chapter deactivated. \r\n\r\nWe look forward to seeing many more meetings from your chapter! \r\n\r\nThanks,\r\n\r\nAndrew van der Stock, Executive Director\r\nLisa Jones, Community Manager", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-asuncion/issues/4", @@ -14449,10 +14937,11 @@ "pk": 505, "fields": { "nest_created_at": "2024-09-11T20:31:05.853Z", - "nest_updated_at": "2024-09-11T20:31:05.853Z", + "nest_updated_at": "2024-09-13T15:59:33.409Z", "node_id": "I_kwDODGgAjc5fOqRT", "title": "Como fazer parte?", "body": "Olá! Já sou membro no OWASP, gostaria de fazer parte do chapter SP.\r\n\r\nObrigado.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-sao-paulo/issues/1", @@ -14475,10 +14964,11 @@ "pk": 506, "fields": { "nest_created_at": "2024-09-11T20:31:39.399Z", - "nest_updated_at": "2024-09-11T20:31:39.399Z", + "nest_updated_at": "2024-09-13T16:00:00.501Z", "node_id": "MDU6SXNzdWU1NjM3MjExODk=", "title": "error with _layouts/col-sidebar.htmlI ", "body": "Hi!\r\n\r\nIm in troubles...During the push I got an error message that I can't do anything with:\r\n\r\n> The page build failed for the `master` branch with the following error:\r\n> \r\n> A file was included in `/_layouts/col-sidebar.html` that is a symlink or does not exist in your `_includes` directory. For more information, see https://help.github.com/en/github/working-with-github-pages/troubleshooting-jekyll-build-errors-for-github-pages-sites#file-is-a-symlink.\r\n\r\nBUT... the file is not in my repo ??\r\n /_layouts/col-sidebar.htmlI don't find it in the repo. I\r\n\r\nI think this is an error yet informed by other capters (I saw it in germany chapter)\r\n\r\nTIA!!", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-chile/issues/2", @@ -14505,6 +14995,7 @@ "node_id": "I_kwDODGf8oM551hKp", "title": "Make flyer to invite to discord server", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-albuquerque/issues/3", @@ -14531,6 +15022,7 @@ "node_id": "I_kwDODGf8oM551hO0", "title": "Print more business cards", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-albuquerque/issues/4", @@ -14553,10 +15045,11 @@ "pk": 509, "fields": { "nest_created_at": "2024-09-11T20:33:06.843Z", - "nest_updated_at": "2024-09-11T20:33:06.843Z", + "nest_updated_at": "2024-09-13T16:01:09.074Z", "node_id": "I_kwDODGf8oM551hT3", "title": "Find contacts for other colleges in area", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-albuquerque/issues/5", @@ -14579,10 +15072,11 @@ "pk": 510, "fields": { "nest_created_at": "2024-09-11T20:33:45.402Z", - "nest_updated_at": "2024-09-11T20:33:45.403Z", + "nest_updated_at": "2024-09-13T16:01:39.087Z", "node_id": "MDU6SXNzdWU3MDQ0OTQyNjA=", "title": "Info.md file with wrong linkage", "body": "Please update your links in info.md to:\r\n1.) Remove Become a Member (it is at the top of the page)\r\n2.) Remove www and index.php references that are holdovers from the wiki (use the owasp.org and current links)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-cairo/issues/1", @@ -14605,10 +15099,11 @@ "pk": 511, "fields": { "nest_created_at": "2024-09-11T20:33:52.920Z", - "nest_updated_at": "2024-09-11T20:33:52.920Z", + "nest_updated_at": "2024-09-13T16:01:45.022Z", "node_id": "I_kwDODGf7Oc6Euih_", "title": "Social links broken", "body": "I am interested in a local meet up! I noticed that the social links are broken. Are there updated links?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-chapter-halifax/issues/1", @@ -14631,10 +15126,11 @@ "pk": 512, "fields": { "nest_created_at": "2024-09-11T20:36:38.848Z", - "nest_updated_at": "2024-09-11T20:36:38.848Z", + "nest_updated_at": "2024-09-13T16:03:56.523Z", "node_id": "I_kwDODGf2JM6BytHM", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context for users. It is important to create a concise and informative summary of the project's purpose, features, and usage guidelines to enhance understanding and engagement. Consider drafting a description that highlights the key aspects of the project.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-internet-of-things-top-10/issues/3", @@ -14657,10 +15153,11 @@ "pk": 513, "fields": { "nest_created_at": "2024-09-11T20:37:00.746Z", - "nest_updated_at": "2024-09-11T20:37:00.746Z", + "nest_updated_at": "2024-09-13T04:12:43.304Z", "node_id": "I_kwDODGf1dM5zL19X", "title": "Update utilities section for Dragon-GPT", "body": "It would be good to add [Dragon-GPT ](https://github.com/LuizBoina/dragon-gpt)into the documentation, and update the section on utilities", + "summary": "Update the utilities section in the documentation to include Dragon-GPT. Ensure that you provide a comprehensive overview of Dragon-GPT's features, installation instructions, and usage examples. \n\nFirst, clone the Dragon-GPT repository to your local machine. Review the current documentation structure and identify where the utilities section is located. Draft content that highlights the advantages of using Dragon-GPT, emphasizing key functionalities and potential use cases. \n\nConsider adding code snippets to demonstrate how to integrate Dragon-GPT into existing workflows. Finally, submit a pull request with your updates for review.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-threat-dragon/issues/66", @@ -14690,6 +15187,7 @@ "node_id": "MDU6SXNzdWU1NTI5ODU5NzY=", "title": "Add synonyms/related terms?", "body": "\"Password spraying\" - testing a list of credentials for a known account - is a subset of credential cracking but the term is getting increased global coverage ([ACSC](https://www.cyber.gov.au/threats/advisory-2019-130), [Mitre](https://attack.mitre.org/techniques/T1110/), [ArsTechnica](https://arstechnica.com/information-technology/2020/01/iranian-hackers-have-been-password-spraying-the-us-grid/)). [Google trends](https://trends.google.com/trends/explore?date=today%205-y&geo=US&q=%22password%20spraying%22,%22credential%20cracking%22) shows that password spraying captures far more searches than credential cracking.\r\n\r\n\"Password replay\", \"credential replay\", and \"password reuse attacks\" have all been used to refer to credential stuffing ([NCSC](https://www.ncsc.gov.uk/news/alert-vpn-vulnerabilities), [Krebs](https://krebsonsecurity.com/tag/credential-replay-attacks/), [1password](https://blog.1password.com/how-to-protect-yourself-from-password-reuse-attacks/)). Credential stuffing is the more popular term but the others keep popping up.\r\n\r\nThe differences, if there are any, across these terms is confusing. There isn't a central location that captures the relationships and readers are left googling terms that net poor results.\r\n\r\nIncluding terms that become common in popular usage will also help drive OWASP awareness.", + "summary": "The issue highlights the need for clarity surrounding the terminology related to credential attacks, specifically \"password spraying,\" \"password replay,\" \"credential replay,\" and \"credential stuffing.\" It points out that while certain terms gain popularity and usage, there is a lack of a centralized resource to explain their relationships and differences. The suggestion is to add synonyms and related terms to improve understanding and awareness within the OWASP community. Action may involve compiling a comprehensive list of these terms and their definitions for better accessibility.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-automated-threats-to-web-applications/issues/1", @@ -14712,10 +15210,11 @@ "pk": 515, "fields": { "nest_created_at": "2024-09-11T20:37:21.085Z", - "nest_updated_at": "2024-09-11T20:37:21.085Z", + "nest_updated_at": "2024-09-13T16:04:29.614Z", "node_id": "I_kwDODGf08s56hh6D", "title": "Will there be any update plan?", "body": "The last update was 4 years ago, will there be any update plan?", + "summary": "The issue raises a concern about the lack of updates for the project, highlighting that the last update was four years ago. It inquires about the possibility of a future update plan. To address this, it may be beneficial to provide a roadmap or outline any upcoming developments to reassure users.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-automated-threats-to-web-applications/issues/3", @@ -14738,10 +15237,11 @@ "pk": 516, "fields": { "nest_created_at": "2024-09-11T20:37:28.792Z", - "nest_updated_at": "2024-09-11T20:37:28.792Z", + "nest_updated_at": "2024-09-13T16:04:35.675Z", "node_id": "MDU6SXNzdWU4NTkxMzI5NDY=", "title": "Can we update the benchmark scorecards?", "body": "I was looking at the _\"tool support / results\"_ tab:\r\n\r\n![image](https://user-images.githubusercontent.com/47480384/114915996-e9c6e900-9de9-11eb-8c48-4c1a0d444514.png)\r\n\r\nAnd found that we have very nice results in this link:\r\nhttps://rawgit.com/OWASP/Benchmark/master/scorecard/OWASP_Benchmark_Home.html\r\n\r\nHowever:\r\n- [ ] It was generated in 2016, can we run results today and update the image?\r\n- [ ] We ([Fluid Attacks](https://fluidattacks.com)) would like to include our security vulnerability detection tool in which we've recently evaluated results against the benchmark:\r\n - https://fluidattacks.com/blog/owasp-benchmark-fluid-attacks/ \r\n - https://docs.fluidattacks.com/machine/scanner/reproducibility (this link is experimental)\r\n \r\n Could you please give us some orientation on how to appear in the results? here: https://owasp.org/www-project-benchmark/ \r\n- [ ] rawgit.com \"RawGit has reached the end of its useful life\", can we host it in other place?\r\n\r\nI volunteer myself for any task needed, just let me know how could we push this forward\r\n\r\nThanks!", + "summary": "Update the benchmark scorecards to reflect current results. Follow these steps:\n\n1. **Run Updated Benchmarks**: Re-evaluate the benchmark results as the existing data is from 2016. Ensure to use the latest tools and methodologies available.\n\n2. **Include Fluid Attacks Tool**: Integrate the security vulnerability detection tool from Fluid Attacks into the benchmark. Review their evaluation results documented in:\n - https://fluidattacks.com/blog/owasp-benchmark-fluid-attacks/\n - https://docs.fluidattacks.com/machine/scanner/reproducibility (note: this link is experimental)\n\n3. **Host Scorecard Elsewhere**: Since RawGit is no longer viable, find an alternative hosting solution for the scorecard, such as GitHub Pages or another static site hosting service.\n\n4. **Coordinate Contributions**: Reach out to contributors for assistance and collaboration on these tasks. Utilize the volunteers, including the one who offered help, to manage the update process efficiently.\n\n5. **Document the Process**: Ensure to keep thorough documentation of changes made and the rationale behind them to maintain transparency.\n\nProceed with these steps to enhance the benchmark scorecards and integrate new tools effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-benchmark/issues/7", @@ -14764,10 +15264,11 @@ "pk": 517, "fields": { "nest_created_at": "2024-09-11T20:37:32.842Z", - "nest_updated_at": "2024-09-11T20:37:32.842Z", + "nest_updated_at": "2024-09-13T16:04:39.575Z", "node_id": "I_kwDODGf0rc5C0F52", "title": "Sort list entries by GH stars and/or age (instead of alphabetically)", "body": "**As an** author of a popular and up-to-date vulnerable application **I want** that app to show up at the top of the relevant VWAD lists **so that** the lists in general represent more of a ranking than an index.\r\n\r\nWith the current implementation of grabbing the GitHub star badges only (i.e. not having the actual number of stars available during rendering) this is probably not possible. The actual number of stars needs to be retrieved in some way. This could for example be a nightly job creating a simple JSON file in the repo that could then be processed during rendering.", + "summary": "The issue highlights the need to sort list entries by GitHub stars or age instead of the current alphabetical order. The goal is to ensure that popular and up-to-date applications appear at the top of the VWAD lists, creating a ranking system rather than a simple index. The current setup only retrieves GitHub star badges, lacking the actual star count during rendering. A proposed solution involves implementing a nightly job to collect and store the star counts in a JSON file, which can then be used during the rendering process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-vulnerable-web-applications-directory/issues/72", @@ -14793,10 +15294,11 @@ "pk": 518, "fields": { "nest_created_at": "2024-09-11T20:37:54.397Z", - "nest_updated_at": "2024-09-11T20:37:54.397Z", + "nest_updated_at": "2024-09-13T16:04:56.268Z", "node_id": "MDU6SXNzdWU1NTE4OTkzNjQ=", "title": "Wiki Deck pages", "body": "The previous wiki deck have not been recreated on the new GitHub site. The archived media wiki pages are at:\r\n\r\nhttps://wiki.owasp.org/index.php/Cornucopia_-_Ecommerce_Website_Edition_-_Wiki_Deck\r\n\r\nWondering if there is a way to script the autogeneration of multiple language versions of card pages from say an xml data file. Can anyone suggest ideas\"", + "summary": "The issue highlights that the previous wiki deck has not been recreated on the new GitHub site, and provides a link to the archived media wiki pages. It suggests exploring the possibility of scripting the autogeneration of multiple language versions of card pages from an XML data file. Suggestions for implementation ideas are requested. It may be beneficial to research automation tools or libraries that can facilitate this process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-cornucopia/issues/1", @@ -14823,10 +15325,11 @@ "pk": 519, "fields": { "nest_created_at": "2024-09-11T20:38:02.504Z", - "nest_updated_at": "2024-09-11T20:38:02.504Z", + "nest_updated_at": "2024-09-13T16:05:02.796Z", "node_id": "I_kwDODGfz8s6BytEW", "title": "Please add a description to this project", "body": "", + "summary": "The issue requests the addition of a project description to provide clarity and context. To address this, a concise and informative description should be drafted and incorporated into the project documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-hacking-lab/issues/2", @@ -14849,10 +15352,11 @@ "pk": 520, "fields": { "nest_created_at": "2024-09-11T20:38:44.786Z", - "nest_updated_at": "2024-09-11T20:38:44.786Z", + "nest_updated_at": "2024-09-13T16:05:35.916Z", "node_id": "I_kwDODGfyyM5CvJfI", "title": "Repetition of Security Checks in Projects ASVS L1 - Authentication Verification Requirements", "body": "I was navigating through the application and found that there is a repetition of each check in Projects -> ASVS L1 -> Authentication Verification Requirements\r\n\r\nNot sure if this is by design or an error. Though both entries consist of same details! \r\n\r\nhttps://demo.securityknowledgeframework.org/projects/summary/\r\n\r\n\"Screenshot\r\n\r\nThank You! \r\n", + "summary": "Investigate the issue of repeated security checks in Projects -> ASVS L1 -> Authentication Verification Requirements. Confirm whether this duplication is intentional or a result of an error. \n\n1. Review the code responsible for generating the ASVS L1 section to identify where checks are being added.\n2. Compare the entries to ensure they contain identical details and note any discrepancies.\n3. Check the database or data source to see if the duplication exists at the data level.\n4. If confirmed as an error, implement a fix by removing the duplicates.\n5. Test the application to ensure the changes do not affect other functionalities.\n6. Document the findings and the resolution process for future reference. \n\nEnsure to communicate with the team regarding the findings and possible implications on user experience or security.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-security-knowledge-framework/issues/12", @@ -14879,6 +15383,7 @@ "node_id": "MDU6SXNzdWU5NzQ5MzM5MDU=", "title": "Stable IDs for (sub-)Headlines of the Proactive Controles", "body": "Hi \r\nI wonder if you could generate stable IDs for main headlines or even more detailed subtitles as far as you like, please. This could help anybody to get a stable link to the proactive controls (e.g. references from the OWASP Top 10).\r\nThe IDs should be strctured ID numbers (like in a table of contents). If possible these IDs should be usable as http-anchors to access them from other projects and documents. These IDs should stay stable within a version of the cheat sheets.\r\n\r\nPlease let me know if I can help you.\r\n\r\nThanks and Cheers\r\nTorsten", + "summary": "The issue discusses the need for stable IDs for main headlines and subtitles in the Proactive Controls documentation. The proposed IDs would facilitate stable linking and referencing, such as from the OWASP Top 10. The IDs should be structured like those in a table of contents and function as HTTP anchors for use in other projects and documents. It is suggested that these IDs remain stable within a version of the cheat sheets. Assistance is offered for this task. \n\nTo address this issue, consider implementing a structured ID system for the headlines and subtitles in the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-proactive-controls/issues/8", @@ -14905,6 +15410,7 @@ "node_id": "I_kwDODGfyac6FDdEt", "title": "Missing concept #2: Trust boundary ", "body": "The second concept I'll advocate is trust boundaries. They are also a tremendously important part of \"most important areas of concern that software developers must be aware of. \"\r\n\r\nSuggested core text: \"Know what code is trusted to make security decisions, and why that code is resistant to attack by an attacker with a debugger or a proxy.\"", + "summary": "The issue discusses the need to include the concept of \"trust boundaries\" as a critical area of concern for software developers. It emphasizes the importance of understanding which code is trusted to make security decisions and the reasons behind its resilience against potential attacks, such as those using a debugger or proxy. It suggests incorporating this concept into the relevant documentation for better awareness and security practices. To address this, the documentation should be updated to include a clear explanation of trust boundaries and their significance in software security.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-proactive-controls/issues/33", @@ -14929,10 +15435,11 @@ "pk": 523, "fields": { "nest_created_at": "2024-09-11T20:39:00.443Z", - "nest_updated_at": "2024-09-11T20:39:00.443Z", + "nest_updated_at": "2024-09-13T04:14:20.834Z", "node_id": "I_kwDODGfyac6FOTSq", "title": "Missing concept: supply chain security", "body": "Modern web development is most commonly done by composing an application from open-source dependencies. \r\n\r\nI suppose most of the supply chain mitigations are not proactive and that's why they're not here yet, but a proactive approach to supply-chain security is possible and emerging.\r\n\r\nSome aspects of Content-Security-Policy are relevant to this. Similarly, Node.js and Deno offer means to limit access to powerful APIs on the process level in case malicious code gets pulled into an app from dependencies.\r\n\r\nUsing tools that inject themselves early in the process and often don't rely on known vulnerability databases (eg. socket.dev) \r\n\r\nEven more proactively, runtime protections can be introduced - see LavaMoat\r\nIntroduction to the concepts:\r\nhttps://www.w3.org/2023/03/secure-the-web-forward/talks/hardened-supply-chain.html\r\n\r\nI'd be interested to contribute to this site if some guidance is provided.", + "summary": "The issue discusses the lack of attention to supply chain security in modern web development, where applications are often built using open-source dependencies. It highlights the need for a proactive approach to supply chain security, mentioning relevant tools and concepts such as Content-Security-Policy, Node.js, Deno, and runtime protections like LavaMoat. The author expresses interest in contributing to the project and seeks guidance on how to proceed. \n\nTo address this issue, it may be beneficial to outline specific steps or areas where contributions are needed related to supply chain security practices and tools.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-proactive-controls/issues/36", @@ -14963,6 +15470,7 @@ "node_id": "I_kwDODGfyac6U_Y6l", "title": "check v3 markdown files, move information to cheat sheet series and, remove them from the project", "body": "we still have the doc/pdfs from the 2018/`v3` series stored. The markdown files in `v3/en` are not used by anything.\r\n\r\nCheck if some of the content from the v3 version could be moved to the owasp cheat sheet series. If so, move it there and remove the markdown files from this project.", + "summary": "The issue suggests reviewing the markdown files from the v3 series stored in `v3/en`, as they are not currently in use. It proposes transferring relevant content to the OWASP cheat sheet series and subsequently deleting the markdown files from the project. The next steps would involve checking the content for relevance and making the necessary updates.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-proactive-controls/issues/59", @@ -14987,10 +15495,11 @@ "pk": 525, "fields": { "nest_created_at": "2024-09-11T20:39:09.782Z", - "nest_updated_at": "2024-09-11T20:39:09.782Z", + "nest_updated_at": "2024-09-13T16:05:52.597Z", "node_id": "I_kwDODGfyKs59hvQk", "title": "SAMM ToolBox point to incorrect link", "body": "In the file info.md, the \"SAMM Toolbox v2\" point to a broken Excel spread sheet : https://github.com/owaspsamm/core/releases/download/v2.0.3/SAMM_spreadsheet.xlsx\r\nThe correct ressource is located here : https://github.com/owaspsamm/core/releases/download/v2.0.8/SAMM_spreadsheet.xlsx\r\n", + "summary": "The issue reports that the link to the \"SAMM Toolbox v2\" in the info.md file directs to a broken Excel spreadsheet. The correct link to the spreadsheet is provided, which points to version 2.0.8 instead of the broken version 2.0.3. The necessary action involves updating the link in the info.md file to the correct resource.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-samm/issues/4", @@ -15017,6 +15526,7 @@ "node_id": "I_kwDODGfx7M47bKJz", "title": "Improve the test coverage", "body": "Unit and integration tests should be written to make sure that the functionalities still work as expected. It would limit the possible regression issues caused by new development/refactorings, especially since there are several possible configuration permutations.\r\n\r\nDetailed manual testing scenarios, executions flows would be very helpful as well because it could also act as a documentation/functional requirements.", + "summary": "Improve test coverage by writing comprehensive unit and integration tests to ensure that all functionalities perform as expected. Focus on limiting regression issues that may arise from new developments or refactorings, especially given the various configuration permutations.\n\nStart by identifying critical functionalities and creating a list of test cases that cover different scenarios. Implement automated tests for both unit and integration levels, prioritizing areas with the highest risk of regression. \n\nAdditionally, document detailed manual testing scenarios and execution flows to serve as both functional requirements and a reference for future testing efforts. \n\nBegin by:\n1. Conducting a code review to identify untested areas.\n2. Creating a test plan that outlines key functionalities and configurations to be tested.\n3. Writing unit tests for individual components.\n4. Developing integration tests that simulate real-world usage with different configurations.\n5. Compiling manual testing documentation for comprehensive coverage.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-csrfguard/issues/24", @@ -15032,9 +15542,9 @@ "repository": 958, "assignees": [], "labels": [ + 128, 126, - 127, - 128 + 127 ] } }, @@ -15047,6 +15557,7 @@ "node_id": "I_kwDODGfx7M47bK1c", "title": "Lots of copied code from the Grouper repository", "body": "The following classes: \r\n* `org.owasp.csrfguard.config.overlay.ConfigPropertiesCascadeBase` ([original source code](https://github.com/Internet2/grouper/blob/master/grouper-misc/grouperActivemq/dist/bin/edu/internet2/middleware/grouperActivemq/config/ConfigPropertiesCascadeBase.java))\r\n* `org.owasp.csrfguard.config.overlay.ConfigPropertiesCascadeCommonUtils`\r\n* `org.owasp.csrfguard.config.overlay.ConfigPropertiesCascadeUtils`\r\n\r\nwere copied from the [Grouper](https://github.com/Internet2/grouper) repository. \r\n\r\n\r\nIt seems that only a few changes has been made:\r\n* Logging: although the code is commented out, so it's not relevant (`org.owasp.csrfguard.config.overlay.ConfigPropertiesCascadeBase#iLogger`)\r\n* Skipping the Expression Language (EL) related processing in `org.owasp.csrfguard.config.overlay.ConfigPropertiesCascadeBase#propertiesHelper`: again this is only relevant if there are keys with \".elConfig\" suffix\r\n* The following lines of code: \r\n \r\n ```java\r\n //InputStream inputStream = configFile.getConfigFileType().inputStream(configFile.getConfigFileTypeConfig(), this);\r\n try {\r\n //get the string and store it first (to see if it changes later)\r\n String configFileContents = configFile.retrieveContents(this);\r\n configFile.setContents(configFileContents);\r\n result.properties.load(new StringReader(configFileContents));\r\n ```\r\n in `org.owasp.csrfguard.config.overlay.ConfigPropertiesCascadeBase#retrieveFromConfigFiles` which seem to do the same as the original code.\r\n\r\nThe question is, **are these modifications really needed**? If not, the original code could be used as a maven dependency: \r\n```xml\r\n\r\n edu.internet2.middleware.grouper\r\n grouper-activemq\r\n 2.5.29\r\n\r\n```\r\n\r\n**Side note**: the Grouper project is outdated/bulky/poorly written with a lot of duplicated code from the `org.apache.commons:commons-lang3` and other common libraries. It would be nice to replace with a better alternative\r\n", + "summary": "Identify and remove duplicated code from the OWASP CSRFGuard repository that originates from the Grouper repository. Specifically, focus on the following classes:\n\n- `org.owasp.csrfguard.config.overlay.ConfigPropertiesCascadeBase`\n- `org.owasp.csrfguard.config.overlay.ConfigPropertiesCascadeCommonUtils`\n- `org.owasp.csrfguard.config.overlay.ConfigPropertiesCascadeUtils`\n\nEvaluate the modifications made to these classes:\n\n1. Review the logging implementation in `ConfigPropertiesCascadeBase#iLogger` to determine if it's necessary.\n2. Assess the impact of skipping Expression Language (EL) processing in `ConfigPropertiesCascadeBase#propertiesHelper`.\n3. Analyze the modifications in `ConfigPropertiesCascadeBase#retrieveFromConfigFiles`, particularly the code that retrieves and loads configuration file contents.\n\nDetermine if the changes are essential. If they are not, consider using the original Grouper code as a Maven dependency:\n\n```xml\n\n edu.internet2.middleware.grouper\n grouper-activemq\n 2.5.29\n\n```\n\nAs a side note, explore the possibility of replacing the outdated and bulky Grouper codebase with more efficient alternatives, particularly avoiding duplicated code from libraries like `org.apache.commons:commons-lang3`. \n\n**First Steps:**\n- Clone the OWASP CSRFGuard repository and set up a development environment.\n- Create a comparison of the copied code against the original code in the Grouper repository using a diff tool.\n- Document the findings regarding the necessity of the modifications.\n- Propose a plan to refactor the code or to replace it with the original Grouper dependency if appropriate.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-csrfguard/issues/25", @@ -15075,6 +15586,7 @@ "node_id": "I_kwDODGfx7M48hgVP", "title": "Document the configurations options that enables overwriting values through the web.xml", "body": "Fix the TODO in: `org.owasp.csrfguard.config.properties.javascript.JavaScriptConfigParameters`", + "summary": "Document configuration options to enable overwriting values via `web.xml`. Focus on detailing how to utilize parameters effectively for various scenarios. \n\nAddress the TODO in `org.owasp.csrfguard.config.properties.javascript.JavaScriptConfigParameters`. Investigate the existing code and identify the intended functionality that needs implementation. \n\nFirst steps:\n1. Review the `web.xml` documentation to understand configuration structure.\n2. Explore the `JavaScriptConfigParameters` class to identify the specific TODO and the context around it.\n3. Develop a plan to implement the missing functionality, ensuring compatibility with existing configurations.\n4. Write unit tests to confirm that overwriting values through `web.xml` works as intended after implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-csrfguard/issues/29", @@ -15090,9 +15602,9 @@ "repository": 958, "assignees": [], "labels": [ - 127, 128, - 129 + 129, + 127 ] } }, @@ -15105,6 +15617,7 @@ "node_id": "I_kwDODGfx7M48hyEV", "title": "Add automation to minify and transpile the JS code to ES5", "body": "- [ ] Releases should work with minified JS code to improve performance.\r\n- [ ] Logging messages should be removed.\r\n- [ ] Transpile to ES5 to support older browsers", + "summary": "Implement automation for minifying and transpiling JavaScript code to ES5. \n\n1. Set up a build tool like Webpack, Gulp, or Grunt to handle the minification and transpilation processes.\n2. Configure the chosen tool to minify the JS code, ensuring it runs during the build process for releases. Use UglifyJS or Terser for efficient minification.\n3. Integrate Babel into the build process to transpile JavaScript code to ES5, supporting older browsers. Create a `.babelrc` file with the necessary presets, such as `@babel/preset-env`.\n4. Add a script to the package.json to automate the build process, running both the minification and transpilation commands sequentially.\n5. Remove all logging messages from the production build to enhance performance. Implement a setting to toggle logging during development versus production environments.\n\nBegin by installing the required dependencies and setting up the initial configuration for the build tool of choice.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-csrfguard/issues/31", @@ -15134,6 +15647,7 @@ "node_id": "I_kwDODGfx7M5h6mfG", "title": "Create integration tests", "body": "Currently the integration of the solution is manually validated through the test application.\r\n\r\nIt would be much nicer to automate the scenarios and make it part of the CI build.\r\n\r\nPart of: https://github.com/OWASP/www-project-csrfguard/issues/24", + "summary": "Implement integration tests to automate the validation of the solution currently done manually using the test application. Integrate these tests into the CI build process for efficiency and reliability.\n\n**Technical Details:**\n1. Identify key scenarios that require testing within the integration workflow.\n2. Utilize a testing framework compatible with the existing application stack (e.g., JUnit, TestNG).\n3. Write automated test cases that cover all identified scenarios, ensuring they mimic the manual validation process.\n4. Ensure tests can be executed within the CI pipeline using tools like Jenkins, GitHub Actions, or Travis CI.\n5. Verify that test results are logged and reported appropriately after each CI build.\n\n**Possible First Steps:**\n- Review existing manual test cases and document them in a structured format.\n- Set up the chosen testing framework within the project environment.\n- Begin writing automated tests for the most critical scenarios.\n- Configure the CI build to include the execution of these tests.\n- Run initial tests and refine based on feedback and results.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-csrfguard/issues/192", @@ -15165,6 +15679,7 @@ "node_id": "I_kwDODGfx7M6FarBk", "title": "Master csrf token incorrectly returned as page token", "body": "### Discussed in https://github.com/OWASP/www-project-csrfguard/discussions/257\r\n\r\n
\r\n\r\nOriginally posted by **musaka872** March 27, 2024\r\nHi,\r\nI'm trying to integrate csrfguard 4.3.0 in our project. \r\nI've configured it to use per-session tokens and not per-page tokens. But when I receive the token in the response header it is returned as page token in this form `{pageTokens:{\"/page/uri\":\"csrf-token\"}}` and then when I send this token in a subsequent request csrfguard compares {pageTokens:{\"/page/uri\":\"csrf-token\"}} to \"csrf-token\" and it fails.\r\nI debugged CsrfGuardFilter and in handleSession method we have:\r\n```\r\nprivate void handleSession(final HttpServletRequest httpServletRequest, final InterceptRedirectResponse interceptRedirectResponse, final FilterChain filterChain,\r\n final LogicalSession logicalSession, final CsrfGuard csrfGuard) throws IOException, ServletException {\r\n\r\n final String logicalSessionKey = logicalSession.getKey();\r\n\r\n if (new CsrfValidator().isValid(httpServletRequest, interceptRedirectResponse)) {\r\n filterChain.doFilter(httpServletRequest, interceptRedirectResponse);\r\n } else {\r\n logInvalidRequest(httpServletRequest);\r\n }\r\n\r\n final String requestURI = httpServletRequest.getRequestURI();\r\n final String generatedToken = csrfGuard.getTokenService().generateTokensIfAbsent(logicalSessionKey, httpServletRequest.getMethod(), requestURI);\r\n\r\n CsrfGuardUtils.addResponseTokenHeader(csrfGuard, httpServletRequest, interceptRedirectResponse, new TokenTO(Collections.singletonMap(requestURI, generatedToken)));\r\n }\r\n```\r\nIn generateTokenIfAbsent it checks whether the per-page or master token should be generated and generates the correct master token. But then as you can see when TokenTO is created the master token is passed as per-page token and it is send as such in the response header.\r\n\r\nIs this a bug or I'm missing something?\r\n\r\nI don't want to parse the response header to retrieve the \"csrf-token\" that csrfguard returns.\r\n\r\nBest regards,\r\nMartin
", + "summary": "Investigate the issue of CSRF token being incorrectly returned as a page token in csrfguard 4.3.0. Confirm that the configuration is set to use per-session tokens, and verify the response header format, which currently appears as `{pageTokens:{\"/page/uri\":\"csrf-token\"}}`. \n\nDebug the `CsrfGuardFilter` class, specifically the `handleSession` method. Observe that the method generates a token using `generateTokensIfAbsent`, which correctly identifies whether to create a master or page token. However, the `TokenTO` constructor incorrectly passes the master token as a per-page token, leading to a mismatch during validation.\n\nAddress the following steps to resolve the issue:\n\n1. Review the implementation of `generateTokensIfAbsent` to ensure it handles master tokens properly.\n2. Modify the `TokenTO` instantiation to correctly identify and pass the generated master token instead of treating it as a page token.\n3. Test the changes to confirm that the response header now reflects the correct token type and that subsequent requests validate successfully. \n\nIf necessary, consult additional documentation on csrfguard or seek input from the community discussion linked in the original issue for further clarification on intended behavior.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-csrfguard/issues/262", @@ -15180,8 +15695,8 @@ "repository": 958, "assignees": [], "labels": [ - 127, - 130 + 130, + 127 ] } }, @@ -15190,10 +15705,11 @@ "pk": 532, "fields": { "nest_created_at": "2024-09-11T20:39:29.358Z", - "nest_updated_at": "2024-09-11T20:39:29.358Z", + "nest_updated_at": "2024-09-13T16:05:58.377Z", "node_id": "I_kwDODGfx7M6V-OuV", "title": "The isValidUrl method in csrfguard.js uses an insecure string-matching technique", "body": "We had run a scan after upgrading csrfguard library to version 4.3.0 and found below vulnerability with severity 5.4 .\r\nIt also reported that there is no non-vulnerable version of this component.\r\n\r\n**Explanation**\r\nThe csrfguard package is vulnerable to Cross-Site Request Forgery (CSRF). The isValidUrl method in csrfguard.js uses an insecure string-matching technique. Consequently, an attacker could exploit this vulnerability to cause tokens to leak in links to external (attacker-controlled) domains.\r\n\r\n**Version Affected**\r\n[3.1.0,4.4.0]\r\n\r\n**CVSS Details**\r\nSonatype CVSS 3 : 5.4\r\nCVSS Vector : CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:L/I:L/A:N", + "summary": "Identify and address the vulnerability in the `isValidUrl` method in `csrfguard.js`. This method utilizes an insecure string-matching technique leading to a Cross-Site Request Forgery (CSRF) vulnerability. The vulnerability allows attackers to exploit the method, potentially leaking tokens via links to external, attacker-controlled domains.\n\n**Affected Versions**: 3.1.0 to 4.4.0.\n**Severity**: CVSS score of 5.4.\n\n**First Steps**:\n1. Review the implementation of the `isValidUrl` method for potential flaws in string matching.\n2. Replace the current string-matching logic with a more secure method, such as using regular expressions or a URL parsing library to validate URLs against a whitelist.\n3. Conduct thorough testing to ensure the new implementation effectively mitigates CSRF attacks without introducing new vulnerabilities.\n4. Run security scans post-implementation to confirm the vulnerability is resolved before deploying the updated library.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-csrfguard/issues/299", @@ -15216,10 +15732,11 @@ "pk": 533, "fields": { "nest_created_at": "2024-09-11T20:39:33.539Z", - "nest_updated_at": "2024-09-11T20:39:33.539Z", + "nest_updated_at": "2024-09-13T04:14:37.188Z", "node_id": "MDU6SXNzdWU1Nzc0OTI2NjQ=", "title": "Link to the Official CS Website", "body": "In order to maintain a live updated website, the official website will be the following: https://cheatsheetseries.owasp.org/\r\nCheat sheets will be added in here as ToCs in order to allow for people to link to them if need be.\r\n\r\nThis could be amended once an easy way is created to properly push the live changes from the main repository to the `www` repository of the cheatsheets.", + "summary": "The issue discusses the need to link to the official CS website at https://cheatsheetseries.owasp.org/ for maintaining a live updated resource. It suggests that cheat sheets should be added as Table of Contents (ToCs) to facilitate easy linking. Additionally, it mentions that this approach may be revised once a more efficient method for pushing live changes from the main repository to the `www` repository is established. To address this, exploring automation or a streamlined process for updates could be beneficial.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-cheat-sheets/issues/16", @@ -15246,6 +15763,7 @@ "node_id": "I_kwDODGfxq85wCTBV", "title": "Typo on repo description", "body": "Hi there, \r\n\r\nI found a typo on repo description. It's written as `Respository` while it should be `Repository`.\r\n\r\n![Screenshot 2023-09-04 140551](https://github.com/OWASP/www-project-webgoat/assets/409455/af112134-b9ec-4421-aaf3-685552bdb8f6)", + "summary": "Correct the typo in the repository description. Change `Respository` to `Repository`. \n\n**Technical Steps:**\n1. Navigate to the repository settings on GitHub.\n2. Locate the section for editing the repository description.\n3. Update the description text to fix the typo.\n4. Save the changes to update the repository description. \n\nVerify that the change is reflected in the repository overview after saving.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-webgoat/issues/8", @@ -15268,10 +15786,11 @@ "pk": 535, "fields": { "nest_created_at": "2024-09-11T20:39:38.591Z", - "nest_updated_at": "2024-09-11T20:39:38.591Z", + "nest_updated_at": "2024-09-13T16:06:05.179Z", "node_id": "I_kwDODGfxq854eFXk", "title": "There is no space on the left side.", "body": "\r\n![snip](https://github.com/OWASP/www-project-webgoat/assets/122084921/1d052e24-6eea-496c-a21a-68cf86f63729)\r\n\r\nOn the left side there should be some space or padding , it will look good.\r\n", + "summary": "Add padding or margin to the left side of the layout. Ensure that the design appears balanced and visually appealing by introducing appropriate spacing. \n\n1. Identify the CSS class or element responsible for the left side layout.\n2. Modify the CSS properties by adding a `padding-left` or `margin-left` value to create the desired space.\n3. Test the changes across various screen sizes to maintain responsiveness.\n4. Review the design in different browsers to ensure consistency. \n\nImplement these steps to enhance the user interface.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-webgoat/issues/9", @@ -15298,6 +15817,7 @@ "node_id": "MDU6SXNzdWU2ODg1OTAwNjY=", "title": "Broken links in reviewing-code-for-cross-site-request-forgery-issues.md", "body": "All of the links in www-project-code-review-guide/pages/reviewing-code-for-cross-site-request-forgery-issues.md are broken. I will submit a pull request to fix the ones that I can find/correct.", + "summary": "The issue reports that all links in the document \"reviewing-code-for-cross-site-request-forgery-issues.md\" are broken. A pull request is planned to address and correct the faulty links. To resolve this, identifying and fixing the broken links in the document is necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-code-review-guide/issues/1", @@ -15324,6 +15844,7 @@ "node_id": "I_kwDODGfxjM48fkhM", "title": "Investigate unknown/unclear sources for information presented in previous Guide 2017", "body": "In the previous guide (2017), which can be found here https://owasp.org/www-project-code-review-guide/assets/OWASP_Code_Review_Guide_v2.pdf, are several graphs and very interesting statements and facts that do not have proper attribution of source. \r\n\r\nFor example, in Figure 1, a survey result that is only very briefly mentioned is depict. The quality of this source, and its legitimacy needs to be investigated. Based on what the investigation finds, we either will keep, remove or update the information for the upcoming version of the guide.\r\n\r\n![image](https://user-images.githubusercontent.com/318561/135821836-cf7d2fd7-8167-48af-8633-e97ada6ac7e2.png)\r\n\r\n\r\nInformation and statements that are hard to judge given the information in the guide can be found throughout. Each one of those has to be investigated and evaluated for future-fit in the updated/new guide.\r\n\r\nEach piece of information that needs to be investigated and evaluated should probably become its own issue. This issue can serve as a \"parent\" issue. ", + "summary": "The issue highlights the need to investigate and verify the sources of information presented in the 2017 Code Review Guide, particularly regarding graphs and statements that lack proper attribution. The legitimacy of these sources must be examined to determine whether the information should be retained, removed, or updated in the upcoming version of the guide. It suggests that each specific piece of information requiring investigation should be documented as a separate issue, with this issue serving as a parent for those discussions. A thorough review of the guide's content is necessary for its future update.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-code-review-guide/issues/4", @@ -15350,6 +15871,7 @@ "node_id": "I_kwDODGfxjM49hm0C", "title": "Add readme file for potential contributors", "body": "Add a readme that explains \r\n\r\n- the purpose and goal of the guide\r\n- the current state it is in\r\n- how to contribute to the project.\r\n\r\nWe also have to make sure the readme is not part of the deployment process.", + "summary": "The issue requests the addition of a README file aimed at potential contributors. The README should outline the purpose and goals of the guide, describe the current state of the project, and provide instructions on how to contribute. Additionally, it is important to ensure that the README is excluded from the deployment process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-code-review-guide/issues/5", @@ -15372,10 +15894,11 @@ "pk": 539, "fields": { "nest_created_at": "2024-09-11T20:39:45.457Z", - "nest_updated_at": "2024-09-11T20:39:45.457Z", + "nest_updated_at": "2024-09-13T04:14:44.616Z", "node_id": "I_kwDODGfxjM49nWdQ", "title": "Create discussable outline from code review guide 2.0", "body": "Create a markdown document that covers the outline of the secure code review guide 2.0, and also has a discussion and decision section for each chapter/section/part. \r\n\r\nThis should guide our discussion on what to keep, adjust and remove from the 2.0 to the 3.0 version. ", + "summary": "The issue requests the creation of a markdown document that outlines the secure code review guide 2.0. It should include a discussion and decision section for each chapter, section, or part, facilitating a comprehensive review of what elements to retain, modify, or discard as the guide transitions from version 2.0 to version 3.0. The next step involves developing the markdown document based on these specifications.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-code-review-guide/issues/7", @@ -15404,6 +15927,7 @@ "node_id": "MDU6SXNzdWU2MDcyMTE3NTU=", "title": "Mobile should be a high priority", "body": "The previous layout was much better in mobile.", + "summary": "The issue highlights that the current mobile layout is not satisfactory and suggests that the previous version was more effective. It indicates that improving the mobile layout should be a high priority. A review and potential redesign of the mobile interface may be necessary to enhance user experience.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-ten/issues/15", @@ -15432,6 +15956,7 @@ "node_id": "MDU6SXNzdWU2MjY4ODA4NzY=", "title": "Add back content for 2013 and 2010 Top 10 Risks", "body": "Apologies if this has already been discussed during your meetings, but we would find it super valuable to have pages for the older 2010 and 2013 Top 10 vulnerabilities. Our particular use case is that we link to the OWASP pages in our vulnerability reports, and we still find linking to _OWASP TOP 10 2013: Cross-site Request Forgery - CSRF_ useful for our readers. There is also an interest in preserving those pages from a historical perspective, as a lot of good content existed prior to the migration to Jekyll.\r\n\r\nWe totally recognize that OWASP isn't interested in maintaining these older pages (Especially with the 2020 version on the horizon) but I think that could easily be mitigated by making it clear that they've been superseded by the latest version on the pages themselves.\r\n\r\nWe also recognize that the PDF archives still exist (Although not easily found or accessible via the website at this time), but the PDFs are fairly large and heavyweight to serve as a quick reference.\r\n\r\nIf the team is not philosophically opposed to the 2013/2010 content, we might be able to spend the time in creating a PR to get it back on the site.", + "summary": "The issue highlights the need for restoring the content related to the 2010 and 2013 Top 10 vulnerabilities on the site. The requester emphasizes the value of these pages for linking in vulnerability reports and preserving historical content, despite the OWASP's focus on newer versions. They acknowledge the existence of PDF archives but note their inaccessibility and size as limitations for quick reference. The suggestion is made that if the team is amenable to this idea, they could potentially create a pull request to reinstate the content. It would be beneficial to clarify that the older versions have been superseded by newer ones if implemented.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-ten/issues/21", @@ -15462,6 +15987,7 @@ "node_id": "MDU6SXNzdWU3ODg3MDI4ODc=", "title": "Statistical data for OWASP 2021", "body": "https://lab.wallarm.com/owasp-top-10-2021-proposal-based-on-a-statistical-data/\r\n\r\nPlease consider reusing this data:\r\n#OWASP | Top-10 2021 | Vulners search query | Avg. CVSS | # of bulletins | Overall score\r\n-- | -- | -- | -- | -- | --\r\nA1 | Injections | injection OR traversal OR lfi OR “os command” OR SSTI OR RCE OR “remote code” | 4.83 | 34061 | 164514.63\r\nA2 | Broken Authentication | authentication | 4.08 | 13735 | 56038.8\r\nA3 | Cross-Site Scripting (XSS) | xss | 0.1 | 433353 | 43335.3\r\nA4 | Sensitive Data Exposure | sensitive AND data | 3.55 | 5990 | 21264.5\r\nA5 | Insecure Deserialization | XXE OR deserialize OR deserialization OR “external entities” | 5.33 | 2985 | 15910.05\r\nA6 | Broken Access Control | access control | 0.72 | 16967 | 12216.24\r\nA7 | Insufficient Logging & Monitoring | logging | 3.35 | 2309 | 7735.15\r\nA8 | Server Side Request Forgery (SSRF) | SSRF OR “server side request forgery” | 3.8 | 1139 | 4328.2\r\nA9 | Known Vulnerabilities | type:cve and (http OR web OR html) | 5.38 | 376 | 2022.88\r\nA10 | Security Misconfiguration | misconfiguration OR misconfigure OR misconfig | 2.27 | 480 | 1089.6\r\n\r\n", + "summary": "The issue discusses the potential reuse of statistical data related to the OWASP Top 10 vulnerabilities for 2021. A summary table is provided, which includes details such as average CVSS scores, the number of bulletins for each category, and overall scores for various vulnerabilities, including Injections, Broken Authentication, and Cross-Site Scripting. \n\nTo proceed, it is suggested to consider how this data can be integrated or leveraged in the current project or documentation to enhance understanding or improve security measures.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-ten/issues/31", @@ -15488,6 +16014,7 @@ "node_id": "MDU6SXNzdWU3OTU1NTM2ODg=", "title": "Updates", "body": "It is important to emphasize the updates to new methods, new API's, new versions or fetures. \r\n", + "summary": "The issue highlights the need to emphasize updates related to new methods, APIs, versions, or features within the project. To address this, it may be necessary to create a clear documentation or communication strategy that outlines these updates effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-ten/issues/32", @@ -15514,6 +16041,7 @@ "node_id": "MDU6SXNzdWU5MzY0ODUzNTg=", "title": "Minor erros and missing pages on \"A10:2017-Insufficient Logging & Monitoring\" page", "body": "As it uses the & character, the name of the file for the page is `A10_2017-Insufficient_Logging%26Monitoring.md`, but when you are ON the page, the `view on github` link seems to be broken (404).\r\n\r\nThe correct URL (Found on the project main page) is `https://owasp.org/www-project-top-ten/2017/A10_2017-Insufficient_Logging%2526Monitoring`.\r\n\r\nThe second issue is the `https://www-01.ibm.com/common/ssi/cgi-bin/ssialias?htmlfid=SEL03130WWEN&` link on `average of 191 days` on the same page. The link leads us to a \"Deleted or moved\" page. As the information about the amount itself is not wrong, it's from 2016, therefore, I believe it's not up to date with the Ponemon institute.\r\n\r\n(Sorry if I posted this in the worng place)", + "summary": "The issue reports minor errors and missing pages on the \"A10:2017-Insufficient Logging & Monitoring\" page. The file name for the page is incorrectly formatted, leading to a broken \"view on GitHub\" link (404 error). The correct URL is noted. Additionally, a link to an IBM resource regarding the \"average of 191 days\" is broken, as it leads to a deleted or moved page, and the information is outdated. To resolve these issues, the file name and links need to be corrected and updated.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-ten/issues/38", @@ -15540,6 +16068,7 @@ "node_id": "I_kwDODGfxcc4-Z73Z", "title": "Comment about A4 on the T10 hompage", "body": "Page: https://owasp.org/Top10/\r\n\r\nThis sentence under A4 does not make total sense to me, can you help me understand it?\r\n\r\n_An insecure design cannot be fixed by a perfect implementation as by definition, needed security controls were never created to defend against specific attacks._\r\n\r\nI think a little cleanup here would be prudent.\r\n\r\nRespectfully, Jim", + "summary": "The issue highlights a sentence under A4 on the OWASP Top 10 homepage that is unclear and potentially confusing. The commenter suggests that the phrasing could benefit from clarification to enhance understanding. A revision of the wording may be necessary to improve its clarity and effectiveness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-ten/issues/46", @@ -15566,6 +16095,7 @@ "node_id": "I_kwDODGfxcc5K3jv-", "title": "Portuguese language change produces missing image", "body": "From a JIRA ticket: \r\n\r\nIf you go to https://owasp.org/Top10/A00_2021_Introduction/ then change the language setting to Português (Brasil) then you can see the image is not showing. There is error 404 for the image. Please fix as soon as possible.", + "summary": "The issue reports that changing the language setting to Português (Brasil) on the specified OWASP page results in a missing image, leading to a 404 error. To resolve this, the missing image needs to be addressed to ensure it displays correctly in the Portuguese version.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-ten/issues/48", @@ -15590,10 +16120,11 @@ "pk": 547, "fields": { "nest_created_at": "2024-09-11T20:39:57.653Z", - "nest_updated_at": "2024-09-11T20:39:57.653Z", + "nest_updated_at": "2024-09-13T04:14:50.707Z", "node_id": "I_kwDODGfxcc5RqCR6", "title": "Informe sobre sistemas\nBy tkmx_nc_tkmx ", "body": "Un pequeño informe sobre las vulnerabilidades ,ataques y puertas traceras sobre un sistema ", + "summary": "The issue discusses the need for a brief report on vulnerabilities, attacks, and backdoors in a system. It suggests a focus on identifying and outlining these security concerns. To address this, it may be beneficial to research and compile relevant information on various types of vulnerabilities and potential threats to system integrity.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-top-ten/issues/50", @@ -15620,6 +16151,7 @@ "node_id": "I_kwDODGfvrc5khoNz", "title": "fix: merge review from @robvanderveer", "body": "the following is an initial review taken from Slack logs: https://owasp.slack.com/archives/C04PESBUWRZ/p1677192099712519\r\n\r\nby @robvanderveer\r\n\r\n\r\n---\r\nDear all,\r\nI did a first scan through the list to mainly look at taxonomy. Here are my remarks.\r\n1.\r\nML01\r\nIn 'literature' the term ‘adversarial’ is often used for input manipulation attacks, but also for data poisoning, model extraction etc. Therefore in order to avoid confusion it is probably better to rename the ML01 adversarial attack entry to input manipulation?\r\n2.\r\nIt is worth considering to add ‘model evasion’ aka black box input manipulation to your top 10? Or do you prefer to have one entry for input manipulation all together?\r\n3.\r\nML03\r\nIt is not clear to me how scenarios 1 and 2 work. I must be missing something. Usually model inversion is explained by manipulating synthesized faces until the algorithm behaves like it recognizes the face.\r\n4\r\nML04\r\nIt is not clear to me how scenario 1 works.\r\nStandard methods against overtraining are missing form the ‘how to prevent’ part. Instead the advice is to reduce the training set size - which typically increases the overfitting problem.\r\n5\r\nML05\r\nModel stealing describes a scenario where an attacker steals model parameters, but generally this attack takes place by ways of black box: gathering input-output pairs and training a new model on it.\r\n6\r\nML07\r\nI don’t understand exactly how the presented scenario should work. I do know about the scenario where a pre-trained model was obtained that has been altered by an attacker. This matches the description.\r\n7\r\nML08\r\nIsn’t model skewing the same as data poisoning? If there’s a difference, to me they are not apparent from the scenario and description.\r\n8\r\nML10 is called Neural net reprogramming but I guess the attack of changing parameters will work on any type of algorithm - not just neural networks. The description also mentions changing the training data, but perhaps that is better left out to avoid confusion with data poisoning?", + "summary": "The issue addresses a review of the taxonomy related to machine learning attack types. Key points for consideration include:\n\n1. Renaming the \"adversarial attack\" entry to \"input manipulation\" to reduce confusion.\n2. The potential addition of \"model evasion\" to the top 10 list.\n3. Clarification needed on specific scenarios related to model inversion and overfitting.\n4. The distinction between model stealing and black box attacks should be elaborated.\n5. Confusion over the definitions of model skewing and data poisoning.\n6. Reassessing the terminology used in ML10 to ensure it applies broadly across algorithms.\n\nTo resolve these points, a thorough review and clarification of the taxonomy entries, scenarios, and descriptions are necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/2", @@ -15652,6 +16184,7 @@ "node_id": "I_kwDODGfvrc5sUuDt", "title": "feat(docs): create page on calculating severity", "body": "Each of the Top 10 items are scored according to [OWASP's Risk Rating Methodology](https://owasp.org/www-community/OWASP_Risk_Rating_Methodology). There should be a page defining how to use the ratings to provide a severity score. This will assist practitioners in knowing 'what to fix' and 'when'.", + "summary": "The issue proposes creating a documentation page that explains how to calculate severity scores based on OWASP's Risk Rating Methodology for the Top 10 items. This resource aims to guide practitioners on prioritizing what needs to be fixed and when. To address this, a detailed page that outlines the rating process and its application should be developed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/15", @@ -15669,9 +16202,9 @@ 238 ], "labels": [ + 136, 134, - 135, - 136 + 135 ] } }, @@ -15684,6 +16217,7 @@ "node_id": "I_kwDODGfvrc5sc_TG", "title": "fix: merge existing body of work from EthicalML https://ethical.institute", "body": "### Type\n\nDocumentation Issue Report\n\n### What would you like to report?\n\nThere is a comprehensive existing body of work at: https://ethical.institute\r\n\r\nThe intent would be to review the [current Top 10 list in this project](https://owasp.org/www-project-machine-learning-security-top-10/) and:\r\n- merge content where appropriate\r\n- create new content missing \r\n- suggest improvements for areas/scope not curently covered\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct", + "summary": "The issue reports a need to merge existing documentation from EthicalML at https://ethical.institute with the current Top 10 list in the project linked. The tasks suggested include merging relevant content, creating new content where it is lacking, and proposing improvements for areas that are currently not covered.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/30", @@ -15698,12 +16232,12 @@ "author": 238, "repository": 981, "assignees": [ - 238, - 240 + 240, + 238 ], "labels": [ - 132, - 137 + 137, + 132 ] } }, @@ -15716,6 +16250,7 @@ "node_id": "I_kwDODGfvrc5ucL4j", "title": "[Fortnightly] Working Group Meeting - 2023-Aug-17", "body": "* Date: [Thursday, August 17 at 0500 UTC](https://dateful.com/convert/utc?t=5am&d=2023-08-02)\r\n* Previous agenda: #42 \r\n\r\n## Current agenda\r\n\r\n1. General project status - [v0.2 milestone complete](https://github.com/OWASP/www-project-machine-learning-security-top-10/releases/tag/v0.2)\r\n2. Contributions and [current help wanted](https://github.com/OWASP/www-project-machine-learning-security-top-10/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22)\r\n3. Introductions (for new contributors)\r\n\r\n## Discussions\r\n\r\n* [Join the OWASP Slack group](https://owasp.org/slack/invite) and the [#project-mlsec-top-10 channel](https://owasp.slack.com/archives/C04PESBUWRZ)\r\n* [Github Discussions](https://github.com/OWASP/www-project-machine-learning-security-top-10/discussions)\r\n\r\n## Calendar Event\r\n[Download calendar event (ICS)](https://calendar.google.com/calendar/ical/c_f818ec1e3dea1d4c80cb0f872566eccb82c5df9cc1161f3077f93eafc47889dc%40group.calendar.google.com/public/basic.ics)", + "summary": "A working group meeting is scheduled for August 17 at 0500 UTC. The agenda includes updates on the project's general status, confirming the completion of the v0.2 milestone, discussing current contributions and help needed, and introducing new contributors. Participants are encouraged to join the OWASP Slack group and the relevant channel, as well as engage in GitHub Discussions. To stay organized, a calendar event is available for download. Further action includes reviewing the contributions and help wanted sections to identify areas for involvement.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/63", @@ -15746,6 +16281,7 @@ "node_id": "I_kwDODGfvrc5vN6es", "title": "feat(rendering): make PDF output from Markdown files more presentable", "body": "The Top 10 list is being rendered using Markdown at https://mltop10.info\r\n\r\nThe site is being rendered using [Quarto](https://quarto.org) and the files from https://github.com/OWASP/www-project-machine-learning-security-top-10/tree/master/docs are mirrored to https://github.com/mltop10-info/mltop10.info\r\n\r\nCurrently a manual process is run for the https://github.com/mltop10-info/mltop10.info locally to render the HTML and PDF outputs which are stored in https://github.com/mltop10-info/mltop10.info/tree/main/docs and used by Github Pages.\r\n\r\nThe rendering for PDF is currently using the default method of LaTeX - example at: https://github.com/mltop10-info/mltop10.info/blob/main/docs/OWASP-Machine-Learning-Security-Top-10.pdf\r\n\r\nQuarto has a lot of formatting options for generating PDF and this needs to be explored to make the PDF and ePUB formats look more presentable.", + "summary": "The issue highlights the need to enhance the presentation of PDF outputs generated from Markdown files for a website that lists the Top 10 machine learning security concerns. The current PDF rendering relies on LaTeX's default settings, which are not optimal. It is suggested to explore Quarto's various formatting options to improve the appearance of the PDF and ePUB formats. This involves reviewing and possibly updating the rendering process to utilize these formatting capabilities more effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/99", @@ -15763,9 +16299,9 @@ 238 ], "labels": [ + 136, 134, - 135, - 136 + 135 ] } }, @@ -15778,6 +16314,7 @@ "node_id": "I_kwDODGfvrc5vvtvV", "title": "[Fortnightly] Working Group Meeting - 2023-Aug-31", "body": "## Current agenda\r\n\r\n1. General project status [v0.3 - in progress](https://github.com/OWASP/www-project-machine-learning-security-top-10/milestone/3)\r\n2. Contributions and [current help wanted](https://github.com/OWASP/www-project-machine-learning-security-top-10/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22)\r\n3. Introductions (for new contributors)\r\n\r\n## Discussions\r\n\r\n* [Join the OWASP Slack group](https://owasp.org/slack/invite) and the [#project-mlsec-top-10 channel](https://owasp.slack.com/archives/C04PESBUWRZ)\r\n* [Github Discussions](https://github.com/OWASP/www-project-machine-learning-security-top-10/discussions)\r\n\r\n## Calendar Event\r\n[Download calendar event (ICS)](https://calendar.google.com/calendar/ical/c_f818ec1e3dea1d4c80cb0f872566eccb82c5df9cc1161f3077f93eafc47889dc%40group.calendar.google.com/public/basic.ics)", + "summary": "The GitHub issue outlines the agenda for the upcoming working group meeting scheduled for August 31, 2023. Key points include the current status of the project (version 0.3), ongoing contributions, and a welcome for new contributors. Additionally, it encourages joining the OWASP Slack group and engaging in GitHub Discussions. To prepare for the meeting, contributors should check for open issues labeled \"help wanted\" and consider participating in the discussions to enhance collaboration.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/102", @@ -15808,6 +16345,7 @@ "node_id": "I_kwDODGfvrc5wjpZK", "title": "Model stealing through interaction is not mentioned", "body": "The current model stealing only describes the model being stolen through parameters, but the model can also be stolen by presenting inputs, capturing the output and using those combinations to train your own model. See AI guide", + "summary": "The issue highlights a gap in the documentation regarding model stealing techniques. It points out that the existing description focuses solely on parameter theft, neglecting the method of stealing a model by interacting with it—specifically by submitting inputs and capturing outputs to train a new model. To address this, the documentation should be updated to include information on this interaction-based model stealing approach.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/113", @@ -15838,6 +16376,7 @@ "node_id": "I_kwDODGfvrc5w8-kb", "title": "[Fortnightly] Working Group Meeting - 2023-Sep-14", "body": "## Current agenda\r\n\r\n1. General project status [v0.3 - in progress](https://github.com/OWASP/www-project-machine-learning-security-top-10/milestone/3)\r\n2. Notable PRs completed since last meeting:\r\n - #104 \r\n - #110 \r\n3. Notable discussions:\r\n - #107\r\n - #108\r\n - #109\r\n 4. Meetings:\r\n - WG meeting will change forward a few hours to accomodate EU morning time zones\r\n5. Contributions and [current help wanted](https://github.com/OWASP/www-project-machine-learning-security-top-10/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22)\r\n6. Introductions (for new contributors)\r\n\r\n## Discussions\r\n\r\n* [Join the OWASP Slack group](https://owasp.org/slack/invite) and the [#project-mlsec-top-10 channel](https://owasp.slack.com/archives/C04PESBUWRZ)\r\n* [Github Discussions](https://github.com/OWASP/www-project-machine-learning-security-top-10/discussions)\r\n\r\n## Calendar Event\r\n[Download calendar event (ICS)](https://calendar.google.com/calendar/ical/c_f818ec1e3dea1d4c80cb0f872566eccb82c5df9cc1161f3077f93eafc47889dc%40group.calendar.google.com/public/basic.ics)", + "summary": "The issue discusses the agenda for a working group meeting, highlighting the current project status (v0.3 in progress), notable pull requests completed since the last meeting, and key discussions. It mentions the scheduling of future meetings to accommodate EU time zones and encourages contributions, with a link to current help wanted issues. New contributors are welcomed, and it promotes joining the OWASP Slack group and participating in GitHub Discussions. \n\nTo move forward, participants should review the project status and notable discussions, and consider contributing to the identified open issues.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/114", @@ -15869,6 +16408,7 @@ "node_id": "I_kwDODGfvrc51QqDw", "title": "feat(docs): create guide for how to use Top 10 list as a ML Engineer", "body": "Reference https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/GUIDELINES.md#ml-engineeranalyst \r\n\r\n- [ ] Create a detailed guidelines document for how to use the information in the Top 10 list for use day to day", + "summary": "The issue suggests creating a comprehensive guidelines document for Machine Learning Engineers on how to effectively utilize the Top 10 list in their daily work. This document should draw references from existing materials and provide clear instructions or best practices. The task involves outlining practical applications of the Top 10 list in the context of machine learning security.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/130", @@ -15884,9 +16424,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 139 + 139, + 135 ] } }, @@ -15899,6 +16439,7 @@ "node_id": "I_kwDODGfvrc51QrtJ", "title": "feat(docs): create guide for how to use Top 10 list as a AppSec Engineer", "body": "Reference https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/GUIDELINES.md#pentestersecurity-engineer\r\n\r\n- [ ] Create a detailed guidelines document for how to use the information in the Top 10 list for use day to day", + "summary": "The issue requests the creation of a detailed guidelines document aimed at assisting AppSec Engineers in utilizing the Top 10 list effectively in their daily work. It references existing documentation for context. The task involves outlining practical applications and recommendations based on the Top 10 list.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/131", @@ -15914,9 +16455,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 139 + 139, + 135 ] } }, @@ -15929,6 +16470,7 @@ "node_id": "I_kwDODGfvrc51QsL5", "title": "feat(docs): create guide for how to use Top 10 list as a CISO", "body": "Reference https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/GUIDELINES.md#ciso\r\n\r\n- [ ] Create a detailed guidelines document for how to use the information in the Top 10 list for use day to day", + "summary": "The issue proposes the creation of a detailed guidelines document aimed at helping users understand how to effectively utilize the Top 10 list in their daily activities as a Chief Information Security Officer (CISO). It references existing guidelines and emphasizes the need for a comprehensive resource. To address this, the task involves developing a structured document that outlines practical applications of the Top 10 list in the CISO's workflow.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/132", @@ -15944,9 +16486,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 139 + 139, + 135 ] } }, @@ -15959,6 +16501,7 @@ "node_id": "I_kwDODGfvrc51QsbP", "title": "feat(docs): create guide for how to use Top 10 list as a Developer", "body": "Reference https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/GUIDELINES.md#developers\r\n\r\n- [ ] Create a detailed guidelines document for how to use the information in the Top 10 list for use day to day", + "summary": "The issue proposes the creation of a detailed guidelines document to help developers effectively utilize the Top 10 list. It references existing guidelines and emphasizes the need for practical, daily use instructions. The task involves drafting comprehensive documentation that outlines this usage.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/133", @@ -15974,9 +16517,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 139 + 139, + 135 ] } }, @@ -15989,6 +16532,7 @@ "node_id": "I_kwDODGfvrc51Qswv", "title": "feat(docs): create guide for how to use Top 10 list as an MLOps Engineer", "body": "Reference https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/GUIDELINES.md#mlops\r\n\r\n- [ ] Create a detailed guidelines document for how to use the information in the Top 10 list for use day to day", + "summary": "The issue requests the creation of a comprehensive guidelines document aimed at MLOps Engineers, detailing how to effectively utilize the Top 10 list in their daily operations. It references existing documentation for context. To address this, a structured and informative guide needs to be developed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/134", @@ -16004,9 +16548,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 139 + 139, + 135 ] } }, @@ -16019,6 +16563,7 @@ "node_id": "I_kwDODGfvrc51QtFx", "title": "feat(docs): create guide for how to use Top 10 list as a Data Engineer", "body": "Reference https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/GUIDELINES.md#data-engineer\r\n\r\n- [ ] Create a detailed guidelines document for how to use the information in the Top 10 list for use day to day", + "summary": "The issue requests the creation of a comprehensive guidelines document aimed at Data Engineers, detailing how to effectively utilize the information provided in the Top 10 list. The guidelines should enhance daily operations and integrate the insights into routine practices. To address this, a structured document outlining practical applications and best practices is needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/135", @@ -16034,9 +16579,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 139 + 139, + 135 ] } }, @@ -16049,6 +16594,7 @@ "node_id": "I_kwDODGfvrc51QyzM", "title": "feat(docs): create a recorded demo of ML01 Input Manipulation Attack", "body": "- [ ] Create a recorded video demo (no audio)\r\n\r\nVideo will be uploaded to [OWASP Youtube Channel](https://www.youtube.com/@owasp-mltop10)\r\n", + "summary": "A request has been made to create a recorded video demonstration of the ML01 Input Manipulation Attack, which should be without audio. The completed video will be uploaded to the OWASP YouTube Channel. To proceed, the video needs to be recorded and prepared for upload.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/136", @@ -16066,9 +16612,9 @@ 239 ], "labels": [ - 135, 136, - 140 + 140, + 135 ] } }, @@ -16081,6 +16627,7 @@ "node_id": "I_kwDODGfvrc51Qz2Z", "title": "feat(docs): create a recorded demo of ML02 Data Poisoning Attack", "body": "- [ ] Create a recorded video demo (no audio)\n\nVideo will be uploaded to [OWASP Youtube Channel](https://www.youtube.com/@owasp-mltop10)", + "summary": "A request has been made to create a recorded video demo of the ML02 Data Poisoning Attack, which should not include audio. The finished video will be uploaded to the OWASP YouTube Channel. To move forward, the demo needs to be recorded and prepared for upload.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/137", @@ -16096,9 +16643,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 140 + 140, + 135 ] } }, @@ -16111,6 +16658,7 @@ "node_id": "I_kwDODGfvrc51Q0B7", "title": "feat(docs): create a recorded demo of ML03 Model Inversion Attack", "body": "- [ ] Create a recorded video demo (no audio)\n\nVideo will be uploaded to [OWASP Youtube Channel](https://www.youtube.com/@owasp-mltop10)", + "summary": "The issue requests the creation of a recorded video demo showcasing the ML03 Model Inversion Attack, with the stipulation that the video should not include audio. The completed video will be uploaded to the OWASP YouTube Channel. To address this issue, the demo needs to be recorded and edited accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/138", @@ -16126,9 +16674,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 140 + 140, + 135 ] } }, @@ -16141,6 +16689,7 @@ "node_id": "I_kwDODGfvrc51Q0I_", "title": "feat(docs): create a recorded demo of ML04 Membership Inference Attack", "body": "- [ ] Create a recorded video demo (no audio)\n\nVideo will be uploaded to [OWASP Youtube Channel](https://www.youtube.com/@owasp-mltop10)", + "summary": "The issue requests the creation of a recorded video demo showcasing the ML04 Membership Inference Attack, specifically noting that the video should not include audio. The completed video will be uploaded to the OWASP YouTube Channel. To address this, a video demonstration needs to be produced and recorded.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/139", @@ -16156,9 +16705,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 140 + 140, + 135 ] } }, @@ -16171,6 +16720,7 @@ "node_id": "I_kwDODGfvrc51Q0N7", "title": "feat(docs): create a recorded demo of ML05 Model Theft", "body": "- [ ] Create a recorded video demo (no audio)\n\nVideo will be uploaded to [OWASP Youtube Channel](https://www.youtube.com/@owasp-mltop10)", + "summary": "The issue requests the creation of a recorded video demo showcasing the ML05 Model Theft feature. The video should be without audio and will be uploaded to the OWASP YouTube Channel. To address this issue, a video demonstrating the feature needs to be produced.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/140", @@ -16186,9 +16736,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 140 + 140, + 135 ] } }, @@ -16201,6 +16751,7 @@ "node_id": "I_kwDODGfvrc51Q0Sr", "title": "feat(docs): create a recorded demo of ML06 AI Supply Chain Attacks", "body": "- [ ] Create a recorded video demo (no audio)\n\nVideo will be uploaded to [OWASP Youtube Channel](https://www.youtube.com/@owasp-mltop10)", + "summary": "The issue requests the creation of a recorded video demo showcasing ML06 AI Supply Chain Attacks, with the stipulation that no audio is required. The completed video will be uploaded to the OWASP YouTube Channel. To address this, a video recording should be planned and executed accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/141", @@ -16218,9 +16769,9 @@ 241 ], "labels": [ - 135, 136, - 140 + 140, + 135 ] } }, @@ -16233,6 +16784,7 @@ "node_id": "I_kwDODGfvrc51Q0X6", "title": "feat(docs): create a recorded demo of ML07 Transfer Learning Attack", "body": "- [ ] Create a recorded video demo (no audio)\n\nVideo will be uploaded to [OWASP Youtube Channel](https://www.youtube.com/@owasp-mltop10)", + "summary": "The issue requests the creation of a recorded video demo showcasing the ML07 Transfer Learning Attack. The video should not include audio and is intended to be uploaded to the OWASP YouTube Channel. To address this, a video demonstrating the attack needs to be produced and prepared for upload.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/142", @@ -16250,9 +16802,9 @@ 242 ], "labels": [ - 135, 136, - 140 + 140, + 135 ] } }, @@ -16265,6 +16817,7 @@ "node_id": "I_kwDODGfvrc51Q0gj", "title": "feat(docs): create a recorded demo of ML08 Model Skewing", "body": "- [ ] Create a recorded video demo (no audio)\n\nVideo will be uploaded to [OWASP Youtube Channel](https://www.youtube.com/@owasp-mltop10)", + "summary": "The issue suggests creating a recorded video demo showcasing the ML08 Model Skewing topic, with the requirement that the video should not include audio. Once completed, the video is intended to be uploaded to the OWASP YouTube Channel. Action needs to be taken to produce and finalize the demo.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/143", @@ -16282,9 +16835,9 @@ 239 ], "labels": [ - 135, 136, - 140 + 140, + 135 ] } }, @@ -16297,6 +16850,7 @@ "node_id": "I_kwDODGfvrc51Q0l5", "title": "feat(docs): create a recorded demo of ML09 Output Integrity Attack", "body": "- [ ] Create a recorded video demo (no audio)\n\nVideo will be uploaded to [OWASP Youtube Channel](https://www.youtube.com/@owasp-mltop10)", + "summary": "The issue requests the creation of a recorded video demo showcasing the ML09 Output Integrity Attack, specifically noting that the video should have no audio. Once completed, the video is intended to be uploaded to the OWASP YouTube Channel. To proceed, a video demonstration needs to be recorded and prepared for upload.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/144", @@ -16312,9 +16866,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 140 + 140, + 135 ] } }, @@ -16327,6 +16881,7 @@ "node_id": "I_kwDODGfvrc51Q0p-", "title": "feat(docs): create a recorded demo of ML10 Model Poisoning", "body": "- [ ] Create a recorded video demo (no audio)\n\nVideo will be uploaded to [OWASP Youtube Channel](https://www.youtube.com/@owasp-mltop10)", + "summary": "The issue requests the creation of a recorded video demo showcasing the ML10 Model Poisoning topic, specifying that the video should not include audio. Once completed, the video will be uploaded to the OWASP YouTube Channel. To address this issue, a video recording demonstrating the concept should be produced.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/145", @@ -16342,9 +16897,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 140 + 140, + 135 ] } }, @@ -16357,6 +16912,7 @@ "node_id": "I_kwDODGfvrc51Q5WF", "title": "feat(docs): create a cheatsheet for ML01 Input Manipulation Attacks ", "body": "- [ ] Is there existing cheatsheets at [OWASP Cheatsheets](https://cheatsheetseries.owasp.org/Glossary.html)\r\n- [ ] If there is an existing cheatsheet, does it need updating at the source to cater for machine learning use cases?\r\n- [ ] Is there a need for a new cheatsheet topic?\r\n- [ ] Add existing or new cheatsheet as a reference to the Top 10 risk document\r\n\r\nExample Cheatsheet: [Input Validation Cheatsheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Input_Validation_Cheat_Sheet.md)\r\n\r\nExample of Top 10 risk referencing cheatsheets: [ML01 Input Manipulation Attacks - Cheatsheets](https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/docs/cheatsheets/ML01_2023-Input_Manipulation_Attack-Cheatsheet.md)\r\n\r\n", + "summary": "The issue discusses the need for a cheatsheet focused on ML01 Input Manipulation Attacks. It outlines several tasks: checking for existing cheatsheets on the OWASP site, determining if they require updates for machine learning applications, assessing the necessity for a new cheatsheet topic, and potentially adding the new or updated cheatsheet as a reference in the Top 10 risk document. To move forward, it would be essential to review the current resources and identify any gaps related to input manipulation in the context of machine learning.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/147", @@ -16371,14 +16927,14 @@ "author": 238, "repository": 981, "assignees": [ + 243, 238, - 239, - 243 + 239 ], "labels": [ - 135, 136, - 141 + 141, + 135 ] } }, @@ -16391,6 +16947,7 @@ "node_id": "I_kwDODGfvrc51RcoQ", "title": "feat(docs): create a cheatsheet for ML02 Data Poisoning Attack ", "body": "- [ ] Is there existing cheatsheets at [OWASP Cheatsheets](https://cheatsheetseries.owasp.org/Glossary.html)\n- [ ] If there is an existing cheatsheet, does it need updating at the source to cater for machine learning use cases?\n- [ ] Is there a need for a new cheatsheet topic?\n- [ ] Add existing or new cheatsheet as a reference to the Top 10 risk document\n\nExample Cheatsheet: [Input Validation Cheatsheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Input_Validation_Cheat_Sheet.md)\n\nExample of Top 10 risk referencing cheatsheets: [ML01 Input Manipulation Attacks - Cheatsheets](https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/docs/cheatsheets/ML01_2023-Input_Manipulation_Attack-Cheatsheet.md)", + "summary": "The issue discusses the creation of a cheatsheet specifically for the ML02 Data Poisoning Attack. It outlines several tasks: checking for existing cheatsheets on the OWASP platform, assessing whether any need updates to include machine learning use cases, determining if a new cheatsheet topic is necessary, and referencing the existing or new cheatsheet in the Top 10 risk document. To proceed, review the current OWASP cheatsheets and identify any gaps related to data poisoning attacks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/150", @@ -16409,9 +16966,9 @@ 239 ], "labels": [ - 135, 136, - 141 + 141, + 135 ] } }, @@ -16424,6 +16981,7 @@ "node_id": "I_kwDODGfvrc51Rc0V", "title": "feat(docs): create a cheatsheet for ML03 Model Inversion Attack", "body": "- [ ] Is there existing cheatsheets at [OWASP Cheatsheets](https://cheatsheetseries.owasp.org/Glossary.html)\n- [ ] If there is an existing cheatsheet, does it need updating at the source to cater for machine learning use cases?\n- [ ] Is there a need for a new cheatsheet topic?\n- [ ] Add existing or new cheatsheet as a reference to the Top 10 risk document\n\nExample Cheatsheet: [Input Validation Cheatsheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Input_Validation_Cheat_Sheet.md)\n\nExample of Top 10 risk referencing cheatsheets: [ML01 Input Manipulation Attacks - Cheatsheets](https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/docs/cheatsheets/ML01_2023-Input_Manipulation_Attack-Cheatsheet.md)", + "summary": "The issue discusses the need to create a cheatsheet for the ML03 Model Inversion Attack. Several tasks have been outlined, including checking for existing cheatsheets on the OWASP website, determining if any existing cheatsheets require updates for machine learning contexts, assessing the necessity for a new cheatsheet topic, and adding relevant cheatsheets to the Top 10 risk document. \n\nTo proceed, one should first review existing resources and identify gaps that the new cheatsheet could fill.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/151", @@ -16439,9 +16997,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 141 + 141, + 135 ] } }, @@ -16454,6 +17012,7 @@ "node_id": "I_kwDODGfvrc51Rc9t", "title": "feat(docs): create a cheatsheet for ML04 Membership Inference Attack", "body": "- [ ] Is there existing cheatsheets at [OWASP Cheatsheets](https://cheatsheetseries.owasp.org/Glossary.html)\n- [ ] If there is an existing cheatsheet, does it need updating at the source to cater for machine learning use cases?\n- [ ] Is there a need for a new cheatsheet topic?\n- [ ] Add existing or new cheatsheet as a reference to the Top 10 risk document\n\nExample Cheatsheet: [Input Validation Cheatsheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Input_Validation_Cheat_Sheet.md)\n\nExample of Top 10 risk referencing cheatsheets: [ML01 Input Manipulation Attacks - Cheatsheets](https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/docs/cheatsheets/ML01_2023-Input_Manipulation_Attack-Cheatsheet.md)", + "summary": "The issue discusses the creation of a cheatsheet specifically for the ML04 Membership Inference Attack. Key tasks include checking for existing cheatsheets on the OWASP site, determining if any need updates to address machine learning scenarios, assessing the necessity for a new cheatsheet topic, and referencing the relevant cheatsheet in the Top 10 risk document. To move forward, one should begin by reviewing existing resources and identifying gaps related to membership inference attacks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/152", @@ -16469,9 +17028,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 141 + 141, + 135 ] } }, @@ -16484,6 +17043,7 @@ "node_id": "I_kwDODGfvrc51RdB5", "title": "feat(docs): create a cheatsheet for ML05 Model Theft", "body": "- [ ] Is there existing cheatsheets at [OWASP Cheatsheets](https://cheatsheetseries.owasp.org/Glossary.html)\n- [ ] If there is an existing cheatsheet, does it need updating at the source to cater for machine learning use cases?\n- [ ] Is there a need for a new cheatsheet topic?\n- [ ] Add existing or new cheatsheet as a reference to the Top 10 risk document\n\nExample Cheatsheet: [Input Validation Cheatsheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Input_Validation_Cheat_Sheet.md)\n\nExample of Top 10 risk referencing cheatsheets: [ML01 Input Manipulation Attacks - Cheatsheets](https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/docs/cheatsheets/ML01_2023-Input_Manipulation_Attack-Cheatsheet.md)", + "summary": "The issue suggests creating a cheatsheet specifically for ML05 Model Theft. It involves checking if there are existing cheatsheets on the OWASP Cheatsheets site and determining whether they require updates for machine learning contexts. Additionally, it explores the necessity of a new cheatsheet topic and proposes adding either the existing or new cheatsheet as a reference to the Top 10 risk document. To proceed, it is essential to evaluate current resources and identify any gaps related to model theft in machine learning.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/153", @@ -16499,9 +17059,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 141 + 141, + 135 ] } }, @@ -16514,6 +17074,7 @@ "node_id": "I_kwDODGfvrc51RdHk", "title": "feat(docs): create a cheatsheet for ML06 AI Supply Chain Attacks", "body": "- [ ] Is there existing cheatsheets at [OWASP Cheatsheets](https://cheatsheetseries.owasp.org/Glossary.html)\n- [ ] If there is an existing cheatsheet, does it need updating at the source to cater for machine learning use cases?\n- [ ] Is there a need for a new cheatsheet topic?\n- [ ] Add existing or new cheatsheet as a reference to the Top 10 risk document\n\nExample Cheatsheet: [Input Validation Cheatsheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Input_Validation_Cheat_Sheet.md)\n\nExample of Top 10 risk referencing cheatsheets: [ML01 Input Manipulation Attacks - Cheatsheets](https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/docs/cheatsheets/ML01_2023-Input_Manipulation_Attack-Cheatsheet.md)", + "summary": "The issue proposes the creation of a cheatsheet specifically for ML06 AI Supply Chain Attacks. It includes several tasks: checking for existing cheatsheets on the OWASP Cheatsheets site, determining if any need updates for machine learning contexts, assessing the necessity for a new cheatsheet topic, and linking the relevant cheatsheet to the Top 10 risk document. To proceed, one should start by reviewing the current resources and identifying gaps that need to be addressed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/154", @@ -16528,14 +17089,14 @@ "author": 238, "repository": 981, "assignees": [ + 241, 238, - 239, - 241 + 239 ], "labels": [ - 135, 136, - 141 + 141, + 135 ] } }, @@ -16548,6 +17109,7 @@ "node_id": "I_kwDODGfvrc51RdMG", "title": "feat(docs): create a cheatsheet for ML07 Transfer Learning Attack", "body": "- [ ] Is there existing cheatsheets at [OWASP Cheatsheets](https://cheatsheetseries.owasp.org/Glossary.html)\n- [ ] If there is an existing cheatsheet, does it need updating at the source to cater for machine learning use cases?\n- [ ] Is there a need for a new cheatsheet topic?\n- [ ] Add existing or new cheatsheet as a reference to the Top 10 risk document\n\nExample Cheatsheet: [Input Validation Cheatsheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Input_Validation_Cheat_Sheet.md)\n\nExample of Top 10 risk referencing cheatsheets: [ML01 Input Manipulation Attacks - Cheatsheets](https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/docs/cheatsheets/ML01_2023-Input_Manipulation_Attack-Cheatsheet.md)", + "summary": "The issue discusses the need to create a cheatsheet for the ML07 Transfer Learning Attack. It raises several points for consideration: checking for existing cheatsheets on the OWASP site, determining if any existing cheatsheets require updates for machine learning applications, assessing the necessity for a new cheatsheet topic, and including the new or updated cheatsheet as a reference in the Top 10 risk document. \n\nTo move forward, it would be helpful to review the current cheatsheets and identify any gaps related to transfer learning attacks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/155", @@ -16562,14 +17124,14 @@ "author": 238, "repository": 981, "assignees": [ - 238, - 239, 242, - 244 + 244, + 238, + 239 ], "labels": [ - 135, - 141 + 141, + 135 ] } }, @@ -16582,6 +17144,7 @@ "node_id": "I_kwDODGfvrc51RdSW", "title": "feat(docs): create a cheatsheet for ML08 Model Skewing", "body": "- [ ] Is there existing cheatsheets at [OWASP Cheatsheets](https://cheatsheetseries.owasp.org/Glossary.html)\n- [ ] If there is an existing cheatsheet, does it need updating at the source to cater for machine learning use cases?\n- [ ] Is there a need for a new cheatsheet topic?\n- [ ] Add existing or new cheatsheet as a reference to the Top 10 risk document\n\nExample Cheatsheet: [Input Validation Cheatsheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Input_Validation_Cheat_Sheet.md)\n\nExample of Top 10 risk referencing cheatsheets: [ML01 Input Manipulation Attacks - Cheatsheets](https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/docs/cheatsheets/ML01_2023-Input_Manipulation_Attack-Cheatsheet.md)", + "summary": "The issue proposes the creation of a cheatsheet specifically for ML08 Model Skewing. It outlines several tasks, including checking for existing cheatsheets on the OWASP Cheatsheets site, determining if any need updates to address machine learning scenarios, and assessing the necessity of a new cheatsheet topic. Additionally, it suggests adding either the existing or new cheatsheet to the Top 10 risk document. To proceed, a review of current resources and potential updates or additions is needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/156", @@ -16597,9 +17160,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 141 + 141, + 135 ] } }, @@ -16612,6 +17175,7 @@ "node_id": "I_kwDODGfvrc51RdYf", "title": "feat(docs): create a cheatsheet for ML09 Output Integrity Attack", "body": "- [ ] Is there existing cheatsheets at [OWASP Cheatsheets](https://cheatsheetseries.owasp.org/Glossary.html)\n- [ ] If there is an existing cheatsheet, does it need updating at the source to cater for machine learning use cases?\n- [ ] Is there a need for a new cheatsheet topic?\n- [ ] Add existing or new cheatsheet as a reference to the Top 10 risk document\n\nExample Cheatsheet: [Input Validation Cheatsheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Input_Validation_Cheat_Sheet.md)\n\nExample of Top 10 risk referencing cheatsheets: [ML01 Input Manipulation Attacks - Cheatsheets](https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/docs/cheatsheets/ML01_2023-Input_Manipulation_Attack-Cheatsheet.md)", + "summary": "The issue discusses the creation of a cheatsheet for the ML09 Output Integrity Attack. It outlines several tasks, including checking for existing cheatsheets on the OWASP site, determining if they require updates for machine learning contexts, assessing the need for a new cheatsheet, and adding any relevant cheatsheet to the Top 10 risk document. It references examples of existing cheatsheets and how they are linked to the Top 10 risks. The next steps involve conducting the checks and updates as outlined.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/157", @@ -16627,9 +17191,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 141 + 141, + 135 ] } }, @@ -16642,6 +17206,7 @@ "node_id": "I_kwDODGfvrc51RddC", "title": "feat(docs): create a cheatsheet for ML10 Model Poisoning", "body": "- [ ] Is there existing cheatsheets at [OWASP Cheatsheets](https://cheatsheetseries.owasp.org/Glossary.html)\n- [ ] If there is an existing cheatsheet, does it need updating at the source to cater for machine learning use cases?\n- [ ] Is there a need for a new cheatsheet topic?\n- [ ] Add existing or new cheatsheet as a reference to the Top 10 risk document\n\nExample Cheatsheet: [Input Validation Cheatsheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Input_Validation_Cheat_Sheet.md)\n\nExample of Top 10 risk referencing cheatsheets: [ML01 Input Manipulation Attacks - Cheatsheets](https://github.com/OWASP/www-project-machine-learning-security-top-10/blob/master/docs/cheatsheets/ML01_2023-Input_Manipulation_Attack-Cheatsheet.md)", + "summary": "The issue discusses the creation of a cheatsheet for ML10 Model Poisoning. Key tasks include checking for existing cheatsheets on the OWASP website, determining if any need updates to address machine learning use cases, assessing the necessity for a new cheatsheet topic, and incorporating either an existing or a new cheatsheet as a reference in the Top 10 risk document. It may be beneficial to explore related examples for guidance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/158", @@ -16657,9 +17222,9 @@ "repository": 981, "assignees": [], "labels": [ - 135, 136, - 141 + 141, + 135 ] } }, @@ -16672,6 +17237,7 @@ "node_id": "I_kwDODGfvrc51Rx2I", "title": "chore(admin): assign owner(s) for ML01 Input Validation Attack", "body": "- [x] Assigned Lead Contributor for ML01\n- [x] Update CODEOWNERS with contributor details\n\nIdeally the Lead Contributor for ML01 will also be assigned to the cheatsheet - ref: #147 ", + "summary": "The issue involves assigning an owner for the ML01 Input Validation Attack and updating the CODEOWNERS file with the relevant contributor details. It has been noted that the Lead Contributor for ML01 should also be linked to the associated cheatsheet, as referenced in a previous issue. To complete the task, ensure that the Lead Contributor is assigned appropriately and that the CODEOWNERS file reflects these changes.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/163", @@ -16703,6 +17269,7 @@ "node_id": "I_kwDODGfvrc51RzAU", "title": "chore(admin): assign owner(s) for ML03 Model Inversion Attack", "body": "- [ ] Assigned Lead Contributor for ML03\n- [ ] Update CODEOWNERS with contributor details\n\nIdeally the Lead Contributor for ML03 will also be assigned to the cheatsheet - ref: #151 ", + "summary": "The issue involves assigning a lead contributor for the ML03 Model Inversion Attack and updating the CODEOWNERS file with the contributor's details. Additionally, it is suggested that the lead contributor for ML03 should also be assigned to the associated cheatsheet referenced in another issue. The tasks to be completed include identifying and assigning the lead contributor and making the necessary updates to the CODEOWNERS file.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/165", @@ -16732,6 +17299,7 @@ "node_id": "I_kwDODGfvrc51RzRE", "title": "chore(admin): assign owner(s) for ML04 Membership Inference Attack", "body": "- [ ] Assigned Lead Contributor for ML04\n- [ ] Update CODEOWNERS with contributor details\n\nIdeally the Lead Contributor for ML04 will also be assigned to the cheatsheet - ref: #152 ", + "summary": "The issue involves assigning a Lead Contributor for the ML04 Membership Inference Attack and updating the CODEOWNERS file with the contributor's details. It is suggested that the Lead Contributor for ML04 should also be designated for the associated cheatsheet. Action items include identifying and assigning the Lead Contributor and making the necessary updates to the CODEOWNERS file.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/166", @@ -16761,6 +17329,7 @@ "node_id": "I_kwDODGfvrc51Rzdy", "title": "chore(admin): assign owner(s) for ML05 Model Theft", "body": "- [ ] Assigned Lead Contributor for ML05\n- [ ] Update CODEOWNERS with contributor details\n\nIdeally the Lead Contributor for ML05 will also be assigned to the cheatsheet - ref: #153 ", + "summary": "The issue involves assigning a Lead Contributor for the ML05 Model Theft project and updating the CODEOWNERS file to include the contributor's details. Additionally, it suggests that the Lead Contributor should also be assigned to the related cheatsheet. To resolve this, the necessary assignments and updates should be made.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/167", @@ -16790,6 +17359,7 @@ "node_id": "I_kwDODGfvrc51R0Mh", "title": "chore(admin): assign owner(s) for ML08 Model Skewing", "body": "- [ ] Assigned Lead Contributor for ML08\n- [ ] Update CODEOWNERS with contributor details\n\nIdeally the Lead Contributor for ML08 will also be assigned to the cheatsheet - ref: #156 ", + "summary": "The issue involves assigning a Lead Contributor for the ML08 Model Skewing and updating the CODEOWNERS file with the contributor's details. Additionally, it suggests that the Lead Contributor should also be involved with the cheatsheet referenced in another issue. To address this, the assigned contributor needs to be determined and the necessary updates made to the CODEOWNERS file.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/170", @@ -16819,6 +17389,7 @@ "node_id": "I_kwDODGfvrc51R0Zy", "title": "chore(admin): assign owner(s) for ML09 Output Integrity Attack", "body": "- [ ] Assigned Lead Contributor for ML09\n- [ ] Update CODEOWNERS with contributor details\n\nIdeally the Lead Contributor for ML09 will also be assigned to the cheatsheet - ref: #157 ", + "summary": "The issue highlights the need to assign a Lead Contributor for the ML09 Output Integrity Attack and to update the CODEOWNERS file with the contributor's details. Additionally, it suggests that the Lead Contributor should also be linked to the cheatsheet referenced in another issue. Action items include assigning a contributor and updating relevant documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/171", @@ -16848,6 +17419,7 @@ "node_id": "I_kwDODGfvrc51R0sl", "title": "chore(admin): assign owner(s) for ML10 Model Poisoning", "body": "- [ ] Assigned Lead Contributor for ML10\n- [ ] Update CODEOWNERS with contributor details\n\nIdeally the Lead Contributor for ML10 will also be assigned to the cheatsheet - ref: #158 ", + "summary": "The issue discusses the need to assign a Lead Contributor for the ML10 Model Poisoning project and to update the CODEOWNERS file with the contributor's details. Additionally, it suggests that the Lead Contributor should also be assigned to the related cheatsheet as referenced in another issue. To resolve this, the necessary assignments should be made and the CODEOWNERS file updated accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/172", @@ -16877,6 +17449,7 @@ "node_id": "I_kwDODGfvrc51eUcp", "title": "feat(docs): create a GLOSSARY page of commonly used terms", "body": "Machine Learning has a lot of terminology that may be new to people. Need to create a glossary page of commonly used terms.", + "summary": "A request has been made to create a glossary page that defines commonly used terms in Machine Learning, as the existing terminology may be unfamiliar to many users. To address this, a glossary with clear definitions of key terms should be developed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/178", @@ -16894,8 +17467,8 @@ 244 ], "labels": [ - 134, - 136 + 136, + 134 ] } }, @@ -16908,6 +17481,7 @@ "node_id": "I_kwDODGfvrc51e28g", "title": "fix: merge review from @harrietf", "body": "[Harriet Farlow](https://www.linkedin.com/in/harriet-farlow-654963b7/) sent through her feedback via mail.\r\n\r\nUploading Word doc and also outputting to Markdown in this issue to track.\r\n\r\nFeedback in Word doc: [OWASP Top Ten Feedback.docx](https://github.com/OWASP/www-project-machine-learning-security-top-10/files/13219930/OWASP.Top.Ten.Feedback.docx)\r\n\r\n\r\n\r\nOutput in Markdown from Word doc below:\r\n---\r\n**General feedback - the list and the home page**\r\n\r\nI think this is great! Some feedback below:\r\n\r\n- I would also add something about these kinds of mitigations being\r\n risk-based, and that each organisation should make a risk-based\r\n assessment of which specific attacks are more likely to be employed\r\n against their models, how impactful the ramifications would be (ie.\r\n would they lose massive amounts of PII or would someone just get a\r\n bad customer experience) and then decide on their ML security\r\n posture from there.\r\n\r\n- I would also highlight some of the reasons why ML systems are\r\n different to traditional cyber systems (ie. ML systems are\r\n probabilistic while cyber systems are rules based, ML systems learn\r\n and evolve while cyber systems are more static). From an\r\n organisational perspective this means that it is unfair to expect\r\n cyber security professionals to automatically know how best to\r\n secure ML systems, and that there should be investment in training\r\n and/or the creation of new roles\r\n\r\n- I would also add a comment about the terminology here being ML\r\n security vs AI security. Clarify the difference between ML and AI\r\n (ie. AI is a broader set of technologies while ML refers\r\n specifically to the AI sub-field of ML models). For example, I\r\n usually refer to my work as AI Security to be more encompassing of\r\n an audience who is used to hearing about AI, but most of what I talk\r\n about is actually ML security. Adding something here about these\r\n terms would be useful for non-ML folk.\r\n\r\n**Some questions before I give feedback on these things**\r\n\r\n- Is the intention of the top 10 list that it is also ordered so that\r\n #1 is most threatening, or is it unordered?\r\n\r\n- What is the methodology/reference for the risk factor values under\r\n each threat? (I know it comes from the specific scenarios, but is it\r\n meant to also be representative of 'common' ways of implementing the\r\n attack? Because it could be very different and might be interpreted\r\n as generic)\r\n\r\n**ML01:2023 Input Manipulation Attack**\r\n\r\n- Where Adversarial Attacks are referenced in the intro, I usually see\r\n this referred to as an Adversarial Example instead and this language\r\n would be more consistent. I'm also not sure what it means by saying\r\n it's an umbrella term, what are the other attacks underneath this\r\n umbrella?\r\n\r\n- The list Adversarial Training, Robust Models, Input Validation are\r\n all good to include but could be rephrased to be mutually exclusive\r\n (ie. the Robust Models section references both Adversarial Training\r\n and Input Validation as the way to make models robust). You could\r\n start with Robust Models first and still mention that those other\r\n two techniques are some ways to make model robust, but is also\r\n dependent on good data quality (clean data, good feature\r\n engineering, complete data), model selection (having to choose an\r\n appropriate balance between accuracy and interpretability, which are\r\n usually trade-offs), considering ensemble methods instead of a\r\n single model (ie. you get results from multiple models as a way of\r\n checking and validation output). You can then into system level\r\n security measures (input validation) and process-based security\r\n measures (incorporating adversarial training).\r\n\r\n- Love the scenarios, that really helps to put it in perspective.\r\n\r\n**ML02:2023 Data Poisoning Attack**\r\n\r\n- Would be worth including more here in the 'about' section around how\r\n this normally happens (an organisation's data repository could have\r\n new data added, or existing data altered), why it works (an ML\r\n model's heavy reliance on training data and the fact that it needs\r\n LOTS of it, which means it's also very hard to spot if it has been\r\n tampered with), who is likely to do this (someone either needs\r\n access to the data repository if the model is trained on internal\r\n data only in which case an insider threat scenario is more likely,\r\n or if the model learns from 'new' data fed from the outside like a\r\n chatbot that learns off inputs without validation (ie. Tay) then\r\n this is a much easier attack) etc. Also clarify how this is\r\n different to the cyber security challenge of just keeping data\r\n secure (ie. it's more about the statistical distribution of the data\r\n than the likelihood of data breach).\r\n\r\n- The 'how to prevent this section' is great, and adding more info\r\n beforehand might help make it clearer as to why these measures are\r\n important, and why the measure to secure data is for different\r\n reasons than traditional cyber security threat models.\r\n\r\n- Again, great examples. You could add something like a chatbot or\r\n social media model here to underscore how publicly injected data is\r\n also a risk here.\r\n\r\n**ML03:2023 Model Inversion Attack & ML04:2023 Membership Inference\r\nAttack**\r\n\r\n- These two attacks could be clarified as to exactly how they're\r\n different, and depending on how you expect someone might use the Top\r\n 10, you could consider combining them. For example, they both leak\r\n information about data used to train the model but model inversion\r\n is about leaking any data, and membership inference is more about\r\n seeing if a specific piece of data was used. Or you could clarify\r\n that they are separate based on the different controls used to\r\n prevent it (ie. is the focus on differentiation here the goal of\r\n attack, or the means of securing it)\r\n\r\n- Model inversion - you could explain what it means to\r\n 'reverse-engineer' in this context. Ie. it is more about targeted\r\n prompting than reverse engineering in a cyber context. This would\r\n also explain why the suggested controls (ie. input verification)\r\n work.\r\n\r\n**ML05:2023 Model Stealing**\r\n\r\n- This is good! Again, I think a longer description would be helpful.\r\n\r\n**ML06:2023 AI Supply Chain Attacks**\r\n\r\n- Yes, love that this is included!\r\n\r\n- Why are we switching from ML to AI here?\r\n\r\n**ML07:2023 Transfer Learning Attack**\r\n\r\n- This is interesting.. In my mind this is more an AI misuse issue\r\n than an AI security issue, but it really depends on the scenario I\r\n think. Could be worth clarifying here.\r\n\r\n**ML08:2023 Model Skewing**\r\n\r\n- This also seems the same as the data poisoning attack.. It's not\r\n clear how they're different. Usually when I saw model skewing the\r\n attack is based on being able to change the model outcome without\r\n actually touching the training data.. Would be worth clarifying\r\n here.\r\n\r\n**ML09:2023 Output Integrity Attack**\r\n\r\n- Again, this attack overlaps a lot with other attacks already listed\r\n that aim to impact the output, it's almost more of an umbrella term\r\n in my mind. Or it could be different but it depends on how an\r\n organisation creates its threat model. The controls mentioned here\r\n are good though, so it's worth clarifying why this is listed as a\r\n different attack and if it's based on the controls suggested (which\r\n would also apply to the other attacks listed).\r\n\r\n**ML10:2023 Model Poisoning**\r\n\r\n- Yes, this is good, and worth adding a bit more about how this can\r\n happen (ie. direct alteration of the model through injection, the\r\n interaction with hardware here and how this can be done in memory\r\n etc)\r\n\r\n", + "summary": "The issue discusses feedback received on a project related to ML security threats. The feedback covers various aspects, including the need for a risk-based approach to mitigations, differentiation between ML and AI security, and clarity on specific attacks. Detailed comments are provided on each of the top ten threats, suggesting improvements in terminology, descriptions, and explanations of concepts. \n\nKey points include:\n\n- Incorporating risk assessments for organizations regarding potential attacks.\n- Highlighting the differences between ML and traditional cyber systems.\n- Clarifying terminology used for ML and AI security.\n- Providing further details on specific attacks and their prevention methods.\n\nTo address the feedback, it may be necessary to revise the documentation with clearer definitions, additional context for the attacks, and an explanation of the methodology behind the rankings and risk factors.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/182", @@ -16940,6 +17514,7 @@ "node_id": "I_kwDODGfvrc53FVcs", "title": "[FEEDBACK]: Sync attack names between LLMT10 and MLT10 where appropriate", "body": "### Type\n\nSuggestions for Improvement\n\n### What would you like to report?\n\nI would like to make the suggestion that we consolidate the terms used in the LLM and ML top 10 documents.\r\n\r\nMany of the top 10 items in each are closely related or even the same.\r\nWhere possible, the same term should be used (i.e. Model Theft vs Model Stealing, Data Poisoning Attack vs Training data Poisoning).\r\n\r\nThanks!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct", + "summary": "The issue suggests consolidating the terminology used in the LLM and ML top 10 documents, as many items are closely related or identical. It recommends using consistent terms, such as \"Model Theft\" instead of \"Model Stealing,\" and \"Data Poisoning Attack\" instead of \"Training Data Poisoning.\" The next steps would involve reviewing both documents to identify and align the related terms.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/187", @@ -16971,6 +17546,7 @@ "node_id": "I_kwDODGfvrc53Iuq0", "title": "[FEEDBACK]: Include MLOps vulnerabilties somewhere in the Supply Chain Security category ", "body": "### Type\r\n\r\nSuggestions for Improvement\r\n\r\n### What would you like to report?\r\n\r\n**Context**\r\nOne of the parts of the supply chain in modern ML systems is MLOps software - like i.e. MLFlow, Prefect etc. Those systems are vulnerable to classic web based attacks and they seem to be \"misconfured by default\". I've described it here: https://hackstery.com/2023/10/13/no-one-is-prefect-is-your-mlops-infrastructure-leaking-secrets/ or here: https://github.com/logspace-ai/langflow/issues/1145 \r\n\r\n**Suggestion for improvement**\r\nI'd suggest including MLOps-related vulnerabilities in the ML06 (or maybe in some other categories as well? I am open for suggestions). \r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow this project's Code of Conduct", + "summary": "The issue raises a suggestion to include MLOps-related vulnerabilities within the Supply Chain Security category, highlighting that MLOps software, such as MLFlow and Prefect, is susceptible to common web-based attacks and often comes misconfigured by default. The author points to existing resources that elaborate on these vulnerabilities. The suggestion is to categorize these vulnerabilities appropriately, potentially under ML06 or other suitable categories. To address this, it may be necessary to assess current categories and determine where MLOps vulnerabilities can be integrated.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/188", @@ -16985,8 +17561,8 @@ "author": 241, "repository": 981, "assignees": [ - 238, - 241 + 241, + 238 ], "labels": [ 132, @@ -17003,6 +17579,7 @@ "node_id": "I_kwDODGfvrc53sTiJ", "title": "[FEEDBACK]: Include a page with a brief descriptions of each of the vulnerabilities", "body": "### Type\n\nSuggestions for Improvement\n\n### What would you like to report?\n\nFor example in Top10 for LLM there's this page with a summary of each of the vulnerabilities, which I think would be pretty useful to have in Top10 for ML as well. \r\n\r\nSometimes when you e.g. work on some slides for a presentation, you just want to get a short summary of each of the vulnerabilities. In my opinion including such a page in Top10 for ML would be an improvement: \r\n\r\n![summary](https://github.com/OWASP/www-project-machine-learning-security-top-10/assets/64902909/0bc2b1c8-43a0-4a5d-b549-71cf83e897c1)\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct", + "summary": "The issue suggests adding a page that provides brief descriptions of each vulnerability in the Top10 for Machine Learning (ML) project. This addition is seen as beneficial for users who may need quick summaries for presentations or other purposes, similar to a feature available in the Top10 for LLM. Implementing this suggestion could enhance the usability of the resource.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/189", @@ -17034,6 +17611,7 @@ "node_id": "I_kwDODGfvrc6B14iW", "title": "OWASP Top 10 ML Summaries", "body": "Ref: https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/189\r\n\r\nAs of now, do we need a markdown file for this or a powerpoint page which contains all the summaries for the project? \r\n\r\ncc: @shsingh @mik0w ", + "summary": "The issue discusses whether a markdown file or a PowerPoint slide should be created to compile all the summaries related to the OWASP Top 10 Machine Learning Security project. It is suggested that a decision needs to be made on the format for presenting this information. Consideration should be given to the best way to organize and share the summaries effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/205", @@ -17058,10 +17636,11 @@ "pk": 595, "fields": { "nest_created_at": "2024-09-11T20:42:41.693Z", - "nest_updated_at": "2024-09-11T20:42:41.693Z", + "nest_updated_at": "2024-09-13T16:07:00.058Z", "node_id": "I_kwDODGfvrc6LD2Xp", "title": "[FEEDBACK]: Description of ML04 Membership Inference Attack", "body": "### Type\n\nGeneral Feedback\n\n### What would you like to report?\n\nhttps://github.com/OWASP/www-project-machine-learning-security-top-10/blob/f1cf662ca9ce5cfcd4c72ab8d4bff91ea64f46d7/docs/ML04_2023-Membership_Inference_Attack.md?plain=1#L27\r\n\r\nHere, the documentation states that `an attacker manipulates the model’s training data`, but from my understanding the objective of a membership inference attack is to [\"\\[...\\] predict whether or not a particular example was contained in the model’s training dataset.\"](https://ieeexplore.ieee.org/document/9833649), so the attacker shouldn't have access to the training data.\r\n\r\nI can create a pull request to update the documentation. Let me know if you'd like me to proceed.\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct", + "summary": "The issue raises a concern regarding the description of a membership inference attack in the documentation. It points out that the current wording suggests the attacker manipulates the model's training data, which contradicts the fundamental nature of such attacks, where the attacker typically does not have access to the training data. The user is willing to create a pull request to clarify this point in the documentation if it is deemed necessary. \n\nTo address this, a review of the documentation's language is suggested to ensure it accurately reflects the mechanics of membership inference attacks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-machine-learning-security-top-10/issues/210", @@ -17079,8 +17658,8 @@ 238 ], "labels": [ - 132, - 143 + 143, + 132 ] } }, @@ -17089,10 +17668,11 @@ "pk": 596, "fields": { "nest_created_at": "2024-09-11T20:43:41.238Z", - "nest_updated_at": "2024-09-11T20:43:41.238Z", + "nest_updated_at": "2024-09-13T16:07:47.175Z", "node_id": "I_kwDODGftyc5feB1C", "title": "small grammar error", "body": "In the index.md, and in CNAS-1: Insecure cloud, container or orchestration configuration\r\nshould 'Imrpoper permissions' change to 'Improper permissions' ?", + "summary": "There is a small grammar error in the index.md file, specifically in the section titled \"CNAS-1: Insecure cloud, container or orchestration configuration.\" The term \"Imrpoper permissions\" should be corrected to \"Improper permissions.\" A fix is needed to update the text accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-cloud-native-application-security-top-10/issues/8", @@ -17115,10 +17695,11 @@ "pk": 597, "fields": { "nest_created_at": "2024-09-11T20:43:57.448Z", - "nest_updated_at": "2024-09-11T20:43:57.448Z", + "nest_updated_at": "2024-09-13T16:08:00.528Z", "node_id": "I_kwDODGftZc5fWt9s", "title": "SwSec 5D Survey", "body": "![SwSec 5D - Survey](https://user-images.githubusercontent.com/26579136/221373538-0a09d21d-1e81-47c7-b406-47b114221bac.jpg)\r\n", + "summary": "The issue discusses the SwSec 5D Survey and includes an image related to it. The main focus appears to be on gathering feedback or information regarding the survey. It may be beneficial to clarify the survey's objectives or instructions for participants to ensure effective responses.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-software-security-5d-framework/issues/1", @@ -17141,10 +17722,11 @@ "pk": 598, "fields": { "nest_created_at": "2024-09-11T20:44:07.768Z", - "nest_updated_at": "2024-09-11T20:44:07.768Z", + "nest_updated_at": "2024-09-13T16:08:09.039Z", "node_id": "MDU6SXNzdWU5NTIzODAyOTU=", "title": "Add a Matrix ", "body": "Add a matrix (likely an .xlsx file) that:\r\n\r\n* Can be used as a checklist and note-taking / task-tracking\r\n* Includes links from each task’s inputs and outputs to their corresponding tasks, as described in the OWASP-Vuln-MGM-Guide-Jul23-2020.pdf file", + "summary": "The issue suggests adding a matrix, probably in the form of an .xlsx file, that serves as a checklist and allows for note-taking or task tracking. It should also include links connecting each task's inputs and outputs to related tasks as outlined in the specified OWASP guide. To address this, one would need to create the matrix and ensure it aligns with the guidelines from the referenced document.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-vulnerability-management-guide/issues/1", @@ -17171,10 +17753,11 @@ "pk": 599, "fields": { "nest_created_at": "2024-09-11T20:44:19.189Z", - "nest_updated_at": "2024-09-11T20:44:19.189Z", + "nest_updated_at": "2024-09-13T16:08:18.147Z", "node_id": "I_kwDODGfszM5G7xDs", "title": "Attack Surface Detector plugin will allways start spider", "body": "After executing Attack Surface Detector plugin in Owasp Zap Proxy, it will try to spider the application even if the checkbox **Automatically start spider after importing endpoints** is unchecked. \r\n\r\nHere is a print showing the problem:\r\n![image](https://user-images.githubusercontent.com/952143/161310230-ae1ce899-278e-441e-b646-afda9411e089.png)\r\n", + "summary": "Investigate the Attack Surface Detector plugin behavior in OWASP ZAP Proxy. Confirm that the plugin starts the spider process regardless of the **Automatically start spider after importing endpoints** option being unchecked. \n\n1. Review the code responsible for the Attack Surface Detector plugin's configuration and execution flow.\n2. Identify where the spider initiation logic is implemented and check for any conditional checks tied to the checkbox state.\n3. Implement logging to capture the state of the checkbox when the plugin runs and the spider starts.\n4. Test the plugin with various configurations to reproduce the issue consistently.\n5. Propose a solution to ensure the spider only starts when the checkbox is checked. \n\nDocument findings and submit a pull request with the necessary changes once the root cause is identified.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-project-attack-surface-detector/issues/1", @@ -17201,6 +17784,7 @@ "node_id": "MDU6SXNzdWU2MDE3MzM5MDI=", "title": "Request for a bit more explanation", "body": "Congrats on the new release! Looks really useful!\r\nI do wonder about a few requirements (See below): can you help me understand what they mean? It would be great if the document can provide a bit more explanation on them!\r\n\r\n`2.8: SBOM is analyzed for risk` ; what type of risk? is that development risks ? (e.g. continuity due to often changing signature of the functions it provides?) or only security risk? Or project continuity risk (e.g. license removed, etc.)\r\n\r\n`2.11: SBOM contains metadata about the asset or software the SBOM describes `: what type of metadata are we talking about :) ?\r\n\r\n`3.9; Application build pipeline prohibits alteration of certificate trust stores` ; if you do infrastructure as code and use a 4-eyed/hardened/etc. gitlab configuring pipeline which triggers a certificate manager from the underlying infrastructure to update trust-stores... then that is a lot safer than doing it my hand ... Ofcourse the requirement says application build pipeline, but i wonder: is there a split between configuration pipeline/application pipeline/infra pipeline?\r\n\r\n` 4.6: Package repository supports security incident reporting` ; where should it report to?\r\n\r\n`4.18: Package manager does not execute code` : do you mean of the packages that it stores, because a lot of the functionality described is only available when you \"upgrade\"/\"install plugin X for manager Y\", etc.?\r\n\r\nI love the standard already; it helps explaining what people should expect from the component management side of things! Will you incorporate something like a verification guide and/or a compliancy list like at the MSTG does for the MASVS? Then it could be easily incorporated into existing standardization processes!", + "summary": "There is a request for clarification on several requirements related to a recent software release. The user seeks additional details on the following points:\n\n1. Requirement 2.8 regarding the types of risks analyzed in the SBOM (Software Bill of Materials), questioning whether it includes development, security, or project continuity risks.\n2. Requirement 2.11 asks for specifics on the type of metadata included in the SBOM.\n3. Requirement 3.9 raises concerns about the distinction between different types of pipelines (configuration, application, infrastructure) in relation to certificate trust stores.\n4. Requirement 4.6 inquires about the destination for security incident reporting.\n5. Requirement 4.18 seeks clarification on whether the restriction on executing code pertains to the packages stored in the manager.\n\nAdditionally, there is a suggestion to create a verification guide or compliance list similar to existing standards to enhance integration into standardization processes. A response addressing these points would be beneficial.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Software-Component-Verification-Standard/issues/9", @@ -17229,6 +17813,7 @@ "node_id": "MDU6SXNzdWU2MDMzNDE5MjM=", "title": "David A. Wheeler's comments", "body": "Comments on OWASP “Software Component Verification Standard” by David A. Wheeler\r\n\r\nHere are my comments on the “Software Component Verification Standard” Version 1.0.0-RC.1 (Public Preview), 16 April 2020, https://owasp-scvs.gitbook.io/scvs/ My apologies that’s it’s one long document; I wrote my comments & then discovered that comments were wanted via GitHub. Had I known that I would have split this up. But hopefully they’ll be useful anyway.\r\n\r\nThis document’s frontispiece says it’s a “grouping of controls, separated by domain, which can be used by architects, developers, security, legal, and compliance to define, build, and verify the integrity of their software supply chain.” So I’m commenting on the document based on that understanding. My high-level comments:\r\n\r\n1. Many requirements are far too vague. It’s impossible to properly review this document without knowing what is necessary to meet its requirements. For every requirement it should be clear how it could and could not be met. See my detailed comments before for many examples of this vagueness. In many cases a key issue is “how far down (transitively) does this go?” Similarly, how much provenance & pedigree information is enough? It appears that the world wide web plus Google would count as a package manager; I presume that was not intended. I recommend trying to rewrite the requirements assuming that a lazy or malicious contractor will re-interpret them in the lowest-cost or most evil way. You can’t really make requirements impossible to re-interpret that way, but that exercise will help turn these vague requirements into more precise requirements that can be reviewed and applied.\r\n2. It’s not clear some of these requirements are practical (again, primarily because they’re too vague to know what they mean). In some cases, depending on their interpretation, the only way they’ll be widely practical is if their implementations are embedded into OSS package managers as OSS. If OWASP intends to help implement some of these in OSS package managers, that could be great, but OWASP needs to avoid mandating impractical or excessively expensive requirements.\r\n3. Does this really apply to entire organizations? The preface suggests it, but in many cases this is probably impractical. The document should instead discuss it being applied to specific projects within an organization, or a specific project, not necessarily a whole organization. That way, organizations can grow into these requirements if they desire.\r\n4. Is this intended to apply to open source software (OSS) projects themselves? OSS projects themselves have supply chains, and their supply chains become the supply chains of their downstream users. I wanted to see if these requirements could be practically met by OSS projects. However, the vagueness of the requirements makes it impossible to determine if an OSS project could implement them. I think the requirements need to be revised to specifically ensure that OSS projects could meet the requirements. Imagine at least one way that an OSS project could implement them. If these requirements can’t be met by OSS projects, then this document is probably impractical, because those dependencies will become transitive requirements on all their users including practically all serious projects today. So again: please review these requirements with that specifically in mind (once the requirements are reworded enough so they can be analyzed).\r\n5. There is no identified threat model, nor any mapping to show that these controls are both adequate and the minimum necessary to counter those threats. What attacks are you trying to counter? Are there controls missing? Are some unnecessary?\r\n6. In some cases it’s not even obvious why anyone would want to meet the requirement as stated. Why would that help? In some cases that would help the vagueness, e.g., “Do XYZ so that will be detected/countered”, because that would help people determine if a particular implementation would meet the need, and it would also help people understand the risk of not meeting some requirement. It would especially help people decide when waivers are fine.\r\n7. The document’s title is extremely misleading & needs to be changed. This document is not about Software Component Verification, so don’t say that it is. Change the title to accurately reflect its comments, in particular that it focuses on supply chain, as I discuss in my specific comments.\r\n\r\nBelow are specific comments.\r\n\r\n=======================\r\n\r\n\r\nSpecific comments:\r\n\r\nTitle: The title is wrong or at least dreadfully misleading. The title needs to be changed to something that accurately reflects its contents. Currently the title says it’s the “Software Component Verification Standard” - yet this specification doesn’t cover software component verification. Per ISO/IEC 15288 and ISO/IEC 12207 the purpose of verification is to ensure that the system meets its requirements (including regulations and such). Yet nothing in this specification verifies that the component meets its requirements, so by definition this document doesn’t support (general) verification. It doesn’t even verify that a component meets its security requirements (never mind ALL its requirements). In addition, the “component analysis” section is inadequate for a serious security analysis; it doesn’t include many important measures to analyze the security of a component. The frontispiece also makes it clear that the title is wrong; the frontispiece says that this specification focuses on the “integrity of their software supply chain.” Since this document is actually focused on software security *supply chain* issues, and not software component verification, the title should be changed to reflect its actual purpose instead of its current misleading name. Please change the title to reflect the document’s actual purpose. An example would be “Software Component Supply Chain Integrity Verification Standard”; I’m sure there are many other possible names. Please ensure that the title clearly and accurately reflects its contents to readers who have not yet read it.\r\n\r\nIn chapter ”Assessment and Certification”:\r\n\r\nIt says, “The recommended way of verifying compliance of a software supply chain with SCVS is by performing an \"open book\" review, meaning that the auditors are granted access to key resources such as legal, procurement, build engineers, developers, repositories, documentation, and build environments with source code.” This text - specifically the term “auditors” - seems to presume that the specification will only be used for third-party audits. Yet I expect a major use of this document (if used) will be for organizations to determine, for themselves, if they meet the requirements. The frontispiece says that the document’s users include “architects, developers, security, legal,...” - but there isn’t any discussion on how THESE groups would use this document The use of this by organizations on themselves needs to be discussed somewhere. I recommend that somewhere before this section there be a discussion on how organizations can use this themselves in various roles, and THEN discuss certification, to make it clear that uses other than certification are supported.\r\n\r\nIn chapter “Using SCVS”:\r\n\r\nNowhere does this document explain V1 through V6, we just get dumped into this list in those sections. What are you calling them (domains? families?)? There should be a hint of that early in the document. I suggest as the first section of “Using SCVS” there be a subsection called “Control families” with text like this: “This specification identifies a set of 6 control families. Each family has an identifier (V1 through V6) and contains a number of specific controls (numbered V1.1, V1.2, and so on). These six families are inventory (V1), software bill of materials (V2), build environment (V3), package management (V4), component analysis (V5), and pedigree and provenance (V6).”\r\n\r\nIt’s claimed that it’s to “Develop a common taxonomy of activities, controls, and best-practices that can reduce risk in a software supply chain”. There’s no real taxonomy here, at least not in the meaning many would accept. State what you’re *actually* doing instead, don’t call it a taxonomy.\r\n\r\nIn chapter “V1 Inventory”\r\nChapters V1 and V2 make a big deal out of distinguishing “inventory” from “software bill of material” (SBOM) yet there seems to be no real difference and it certainly isn’t clearly explained. The term “inventory” is not defined in the glossary. From the text it appears that an SBOM is simply the inventory list. if so, why use two different words for basically the same thing? I’m guessing a difference was intended but the document doesn’t clearly state it. This concern affects everything that uses the term “inventory” and “SBOM”. As a result, I don’t know exactly what many of these requirements actually mean, making it hard to review & hard to apply.\r\nAdd underlined, as that’s another common way to identify something: “Component identification varies based on the ecosystem the component is part of. Therefore, for all inventory purposes, the use of identifiers such as Package URL or (name of common package manager repository + name of package within it) may be used to standardize and normalize naming conventions for managed dependencies.\r\nIn the text, “Having organizational inventory of all first-party, third-party, and open source components…” - that doesn’t make sense, OSS components are also first-party or third-party, Perhaps rewrite to “all first-party and third-party components, whether they are proprietary or open source software, …”\r\n\r\nRequirement V1.1: “All components and their versions are known at completion of a build” - does this include transitive dependencies? Does it include the underlying operating system & database (which may be tracked by a different system)? How about compile/build tools like compilers and test frameworks? It’s so brief that I have no idea what this requirement means.\r\n\r\nV1.2: Says, “Package managers are used to manage all third-party binary components” - what is meant by a package manager? If I have a project-local shell script that re-downloads field from URLs listed in a file, does that count? Shouldn’t there be a separate requirement for third-party source code (not binary) components at higher levels? E.g., JavaScript & Python? I strongly encourage people to use package managers, but it’s not clear that demanding them for all binary components is practical, especially for embedded systems like IoT systems.\r\n\r\nV1.3 & V1.4: V1.3 says “Inventory”, V1.4 says “bill of materials”. As noted earlier, I see no serious difference, say “bill of materials” in both places or explain why they’re different somewhere. These need clear definitions.\r\n\r\nV1.3: “An accurate inventory of all third-party components is available in a machine-readable format” - again, is this transitive? Most proprietary libraries will NOT allow their recipients to find out what’s transitively included. I think it’s a great goal, but I think a minority of current projects could manage this today; I have concerns about its practicality unless you’re only applying this to green field development.\r\n\r\nV1.5: “Software bill-of-materials are required for new procurements” - does this mean GENERATING an SBOM is required, or that software for use must come with an SBOM? If it’s the latter, today that’s often impractical, especially for any proprietary software. Also, once again, this is the “inventory” section but it’s asking about SBOMs… which again leads me to believe there’s no real difference.\r\n\r\n\r\nV1,10 “Point of origin is known for all components” - what does this mean? I record the https URL? I have the passport numbers for all the authors? I have no idea how to verify this, or really even what it means.\r\n\r\n\r\nIn chapter V2:\r\n\r\nV2.1: “A structured, machine readable software bill-of-materials (SBOM) format is present” - this is yet another requirement that needs to be reworded to be clear. You mean that I have to find a random SBOM off the street & put it in my directory? Is this the SBOM for the software I’m developing? Are these SBOMs for the software I’m ingesting? How deep do they need to go - can it be just direct dependencies?\r\nV2.3: “Each SBOM has a unique identifier” - why? I presume what’s meant is that there be a way in the SBOM to indicate exactly what version(s) of software it applies to. Otherwise, every time I regenerate an SBOM I would have to create a unique ID, which would make reproducible builds impossible (and that would be terrible).\r\nV2.4: “SBOM has been signed by publisher, supplier, or certifying authority.” - Absolutely not. An SBOM should be signed this way when its corresponding software is RELEASED, but the requirement doesn’t clearly say that. The same problem happens with many of the rest of the requirements, there’s a failure to indicate WHEN things need to happen with the SBOM.\r\n2.9 “SBOM contains a complete and accurate inventory of all components the SBOM describes” - proprietary vendors will not permit that; at best they’ll let you refer to direct dependencies, not indirect ones. This whole document needs to clarify direct vs. transitive dependencies, when are which required?\r\n\r\nIn chapter V3:\r\n\r\nV3.1: “Application uses a repeatable build” - I presume this simply means that if you repeat a build with unchanged inputs, you get the same (bit-for-bit) result, and that makes sense for level 1. That needs to be clarified, because that’s not the same as a reproducible build. However, at higher levels (say level 3) I would expect another additional criterion that specifically require reproducible builds (not just repeatable builds): “Application uses an independently-verified reproducible build” - that is, someone *else* can take the inputs and produce the same (bit-for-bit) result.\r\n\r\n3.1 - does this include ensuring that machine learning results are repeatable given the same data sets? Often training results are considered “data” - yet that data affects execution. It probably should include it.\r\n\r\n3.2 - “Documentation exists on how the application is built and instructions for repeating the build”. I understand you want to make automation optional, but this goes too far. If you have to follow instructions, instead of initiating a build command, you are basically guaranteeing disaster over time. People almost never read instructions, at best they copy & paste a command. Change this to something like: “Documentation exists on how the application is built and there are instructions on how to re-execute the automated system for (re)building the system”.\r\n\r\n3.4 “Application build pipeline prohibits alteration of build outside of the job performing the build” - this is very unclear. What is meant by this? This needs clarification. Same for most of the rest of this section.\r\n\r\n3.10 Application build pipeline enforces authentication and defaults to deny & 3.11 Application build pipeline enforces authorization and defaults to deny. - What is meant here? By what?\r\nMost of the requirements in 3 presume that the application build pipeline survives a build. Yet in many cases they’re in containers or temporary VMs, so these questions make no sense.\r\n\r\n3.15 “Application build pipeline has required maintenance cadence where the entire stack is updated, patched, and re-certified for use” - what is a “required maintenance cadence”? Why do I want one? How would I know when I have it? This is way too vague.\r\n\r\n3.17 “All build-time manipulations to source or binaries are known and well defined”. Clarify that this should include compiler optimizations from past executions (e.g., branch-probabilities and JIT warm-ups).\r\n\r\n3.18 “Checksums of all first-party and third-party components are documented for every build” - this should be recorded - not documented (nobody looks at the documentation) & automatically checked later. If this checking is not automated it will generally not happen. It says “checksums”; I think that should be “cryptographic hashes” at least, since a “checksum” need not be a cryptographic hash.\r\n\r\n3.19 - again, “checksum” should be “cryptographic hash”.\r\n\r\n3.21 - “Unused direct and transitive components have been removed from the application”. Agree for direct components, but for transitive components this needs more nuance. Often it’s hard to tell if something is “unused.” In addition, there are risks to removing components if it’s indirect that can cause other failures.\r\n\r\nIn chapter V4:\r\n\r\nThe term “package manager” is unclear. The glossary definition (“Package manager - A distribution mechanism that makes software artifacts discoverable by requesters.”) doesn’t really help. As defined, the world wide web and Google are a package manager.\r\n\r\n4.1 “Binary components are retrieved from a package repository” - I applaud the goal, but this is probably unrealistic unless “World Wide Web” counts as a package repository. If a proprietary vendor sells a binary component, they often won’t use traditional package manager interfaces; what is expected here? Not all OSS is in a package manager’s repo. Are you expecting developers to stop using these components? That seems impractical today; what are the options when that is not possible? What is the actual problem this is trying to solve, so that we can identify appropriate workarounds?\r\n\r\nV4.2: “Package repository contents are congruent to an authoritative point of origin for open source components” - what does “congruent” mean? Reword or define, this is unclear. I have no idea if this is good or not.\r\nV4.3: “Package repository requires strong authentication” - for what? To read? Probably not, most such package repositories allow anyone to read. I presume what’s meant is to modify (create, update, delete), but that’s not stated. Also: Many package repos won’t require this, and OWASP can’t make them. Instead, you could require that the packages YOU DOWNLOAD have strong authentication for modification; that’s far more practical.\r\n4.11 - “Package repository provides auditability when components are updated” - it sounds good, but what does “auditability” mean in this context?\r\n4.13/4.14 - what can projects do if their package manager doesn’t support these capabilities? I’m guessing that that they could augment their package manager with these functions and call the combination their “package manager” - yes? I don’t think all package managers support these functions, so you need to discuss how to handle these cases.\r\n4.16 “Package manager validates TLS certificate chain to repository and fails securely when validation fails” - many organizations use HTTPS (TLS) intercepting proxies (and re-sign with their own certs). I am not a fan of this approach, but it’s widespread, and if you forbid it this is a no-go. This requirement needs to be rewritten or clarified so that it can work in such settings, or acknowledge that a large number of organizations will not be able to use this specification. E.g., change “to repository” into “to repository or that organization’s authorized proxy”.\r\nV4.18 “Package manager does not execute code” - I presume what’s meant is that it doesn’t execute code merely when it downloads the code (it should say that instead). I applaud the sentiment, but I think that’s probably a non-starter today. I believe many package managers still can execute code when packages are downloaded. At the language repo level, at least JavaScript/NPM, Python, and Ruby support execution (Source: “Typosquatting programming language package managers” by Nikolai Tschacher, 2016, https://incolumitas.com/2016/06/08/typosquatting-package-managers/ ). The RPM package system (used by Red Hat Enterprise Linux, CentOS, Fedora, and SuSE) includes %pre and %post sections that execute scripts. OWASP won’t be able to change whether or not packages execute programs on installation for a long time, if ever, because of backwards compatibility issues. People will just ignore impractical advice like “don’t use the normal package manager for your situation” - and ignore OWASP if it tries to enforce this. Bedies, other requirements in this document require a package manager. OWASP could recommend a flag or something, or more pragmatically, require builds in a safe sandbox (such as a container or VM) that restricts the potential impact of executing code.\r\n\r\nV4: Should add “Anti-typosquatting measures are established when using public package repos by the project or repo” for level 3. Typosquatting is a big problem. Not all public package repos counter typosquatting; projects should have measures in place where the public repo does not.\r\n\r\nChapter V5:\r\n\r\nV5.1 “Component can be analyzed with linters and/or static analysis tools” - that’s pointless, anything CAN be. Do you mean that the source code is available, so that *source* code analysis can be used? Do you mean that the source code is not obfuscated?\r\n\r\n5.2 “Component is analyzed using linters and/or static analysis tools prior to use” and 5.3\r\n“Linting and/or static analysis is performed with every upgrade of a component” - why bother? I can run some tools & throw away the results. Unless you require someone to do something (e.g., analyze those results to determine if there’s an unusual/unacceptable level of risk), this is a complete waste of time.\r\n\r\n5.4: “An automated process of identifying all publicly disclosed vulnerabilities in third-party and open source components is used” - this just be “components” - first-party gets no free ride. This also seems to be a one-time thing; continuous monitoring is important too, as reports can happen later.\r\n\r\nI don’t see any requirement to *do* anything when a publicly disclosed vulnerability is found; without that, why bother? There should be a triaging so that less-important and unexploitable vulnerabilities are deferred (ideally you fix everything instantly, but that’s impractical & rushing everything may increase the likelihood of making even worse mistakes). However, important exploitable vulnerabilities caused by a component DO need to addressed rapidly.\r\n\r\nThis section doesn’t begin to seriously require security analysis. Fuzzing, assurance cases, red teams, secure requirements / design / implementation, training of developers, and so on. That’s fine if the real focus is supply chain analysis, as noted earlier.\r\n\r\nChapter V6:\r\n\r\nThe terms “pedigree” and “provenance” are not defined adequately enough so that anyone would agree on what counts and what doesn’t count.\r\n\r\n6.1 “Provenance of modified components is known and documented” - how far? E.g., I download a JavaScript package named X via NPM. Is that enough? After all, “it came from NPM”. If not, what IS enough?\r\n\r\n6.2 - “Pedigree of component modification is documented and verifiable”. If I record “Fred modified it” & sign it, is that enough? It appears that the answer is “yes”; if that wasn’t intended, then what was intended needs to be defined. The glossary definition of Pedigree is “Data which describes the lineage and/or process for which software has been created or altered.” - and that’s too vague to be useful.\r\n\r\nChapter “Guidance: Open Source Policy”\r\n\r\nAgain, this isn’t a taxonomy.\r\n\r\nSince this isn’t a requirement, should this be in the document at all? This should probably be in a separate document.\r\n\r\n“All organizations that use open source software should have an open source policy” - this is a joke, right? Who uses software & doesn’t use OSS? Even proprietary software practically always has OSS embedded within it.\r\n\r\nA policy that tries to declare “How many major or minor revisions old are acceptable” is probably impractical. There are too many variations in applications and components for that to make much sense.\r\n\r\n\r\nChapter Appendix A: Glossary\r\n\r\nThese definitions are too high-level. E.g., “Package manager” is “A distribution mechanism that makes software artifacts discoverable by requesters.” - so I guess the world wide web + Google is a package manager.\r\n\r\nChapter Appendix B: References:\r\nI’m surprised that NIST SP 800-161 (“Supply Chain Risk Management Practices for Federal Information Systems and Organizations”) wasn’t referenced. Have you examined it?\r\n\r\nI didn’t see the “Open Trusted Technology Provider (O-TTPS)” standard referenced, “Open Trusted Technology Provider™ Standard – Mitigating Maliciously Tainted and Counterfeit Products (O-TTPS)”. It was developed by the Open Group, and is now ISO/IEC 20243:2015. That focuses more on “best practices for global supply chain security and the integrity of commercial off-the-shelf (COTS) information and communication technology (ICT) products” - but it should probably be mentioned & reviewed.\r\n\r\nI suspect this should refer to OpenChain, and again, review it. https://wiki.linuxfoundation.org/_media/openchain/openchainspec-2.0.pdf\r\n\r\nConsider adding the CII Best Practices badge in the references, and reviewing it. See: https://github.com/coreinfrastructure/best-practices-badge/blob/master/doc/criteria.md and https://github.com/coreinfrastructure/best-practices-badge/blob/master/doc/other.md\r\n\r\nThese comments are my own, but I hope they help. Good luck!", + "summary": "The issue raises extensive concerns regarding the OWASP \"Software Component Verification Standard\" Version 1.0.0-RC.1. Key points include:\n\n1. **Vagueness of Requirements**: Many requirements are deemed too vague, making it difficult to determine how to meet them. A recommendation is made to rewrite the requirements to ensure clarity and prevent misinterpretation by parties with potentially malicious intentions.\n\n2. **Practicality**: There are doubts about the practicality of some requirements, especially for open source software projects. There is a suggestion to consider if these requirements can realistically be implemented.\n\n3. **Scope of Application**: The document may need to clarify whether it applies to entire organizations or specific projects, suggesting that focusing on individual projects may be more practical.\n\n4. **Threat Model and Justification**: The lack of a defined threat model raises concerns about whether the controls are adequate or necessary. Clear connections between requirements and potential threats should be established.\n\n5. **Misleading Title**: The title of the document is considered misleading, with a suggestion to rename it to accurately reflect its focus on software supply chain integrity rather than verification.\n\n6. **Specific Comments**: Numerous sections of the document are critiqued for lack of clarity, inadequate definitions, and impractical requirements, particularly regarding inventory, software bill of materials (SBOM), and package management. \n\nTo address these issues, a thorough review and revision of the requirements for clarity, practicality, and applicability are necessary. Additionally, the document's scope, purpose, and terminology should be clearly defined to ensure users can effectively implement its guidelines.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Software-Component-Verification-Standard/issues/21", @@ -17257,6 +17842,7 @@ "node_id": "MDU6SXNzdWU2NDU2MTg1MjY=", "title": "Control Mapping", "body": "Possible improvements to the spec would be to map SCVC controls to existing control documents including:\r\n* NIST 800-53\r\n* NIST 800-171\r\n* CMMC\r\n* OWASP ASVS\r\n* OWASP SAMM\r\n* BSIMM", + "summary": "The issue suggests that there are potential enhancements to the control mapping specifications by aligning SCVC controls with established control frameworks such as NIST 800-53, NIST 800-171, CMMC, OWASP ASVS, OWASP SAMM, and BSIMM. To address this, a review and mapping of SCVC controls to these existing documents should be conducted.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Software-Component-Verification-Standard/issues/28", @@ -17285,6 +17871,7 @@ "node_id": "MDU6SXNzdWU5Nzg5MjExNzM=", "title": "lvl 2 and lvl 3 is impossible due to requiring both reproducability and non-reproducability of SBOMs", "body": "`2.2 SBOM creation is automated and reproducible` means the SBOM must be reproducible, a good requirement for lvl2 and lvl3.\r\n`2.7 SBOM is timestamped` requires a timestamp for every level. \r\n\r\nTimestamps are the bane of reproducibility.\r\n\r\n> Timestamps make the biggest source of reproducibility issues. Many build tools record the current date and time. The filesystem does, and most archive formats will happily record modification times on top of their own timestamps. It is also customary to record the date of the build in the software itself…\r\n> \r\n> **Timestamps are best avoided**\r\n\r\nhttps://reproducible-builds.org/docs/timestamps/\r\n\r\nI can understand the desire for a timestamp but if it's included there needs to be details around the idea of `SOURCE_DATE_EPOCH` (a timestamp based on the last modification to any of the source or some other fixed timestamp). It needs to be clearly explained that this is allowed for the timestamp and is in fact required once you require reproducibility for lvl2+.\r\n\r\nhttps://reproducible-builds.org/docs/source-date-epoch/\r\n\r\nTangentially related to #9 request for more explanation", + "summary": "The issue discusses the conflicting requirements for achieving levels 2 and 3 in SBOM (Software Bill of Materials) creation, specifically addressing the challenges posed by the need for both reproducibility and non-reproducibility due to timestamp requirements. It highlights that while automated and reproducible SBOM creation is essential, timestamps can undermine reproducibility by introducing variability in builds. The discussion emphasizes the need for an alternative approach, such as using `SOURCE_DATE_EPOCH`, to maintain reproducibility while still incorporating timestamps. Additional clarity around this alternative is suggested, as well as a connection to another related issue. \n\nTo address this, a proposal could be made to clarify the use of `SOURCE_DATE_EPOCH` in the guidelines for SBOM timestamping.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Software-Component-Verification-Standard/issues/31", @@ -17311,6 +17898,7 @@ "node_id": "I_kwDODDeA9s5N5KKD", "title": "Translate to Japanese", "body": "I want to translate this standards to Japanese. \r\nHow can I contribute ?\r\n\r\nI just translate markdowns in \"en/\" directories?\r\n\r\nhttps://github.com/OWASP/Software-Component-Verification-Standard/tree/master/en", + "summary": "The issue discusses the desire to translate standards into Japanese and seeks guidance on how to contribute. It questions whether the translation should focus solely on markdown files located in the \"en/\" directories. To assist, it may be helpful to clarify the contribution process and confirm the specific files that need translation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Software-Component-Verification-Standard/issues/33", @@ -17337,6 +17925,7 @@ "node_id": "I_kwDODDeA9s5OTspK", "title": "Create BOM Maturity Model Taxonomy", "body": "The mindmap of the taxonomy currently in development is located:\r\nhttps://drive.google.com/file/d/1Uot5Ntm0NB3kJgHAc7fDtZTleJIhZS2P/view?usp=sharing\r\n\r\nUse [XMind](https://www.xmind.net/) to view.\r\n\r\nA preview of the taxonomy is here (may not always be update to date):\r\nhttps://drive.google.com/file/d/1_GIylG4K3mT_TPeGJlIUj4HtouNRgtPQ/view?usp=sharing\r\n\r\n\r\n", + "summary": "The issue discusses the creation of a BOM (Bill of Materials) Maturity Model Taxonomy. A mind map for the taxonomy is currently under development and can be accessed via a Google Drive link. Users are advised to use XMind to view the mind map. Additionally, there is a preview link provided that may not always reflect the latest updates. It seems necessary to further develop or refine the taxonomy based on the existing resources.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Software-Component-Verification-Standard/issues/34", @@ -17367,6 +17956,7 @@ "node_id": "I_kwDODDeA9s5OTs4-", "title": "Develop BOM Maturity Model - maturity levels and assignments", "body": "Depends on: #34", + "summary": "The issue discusses the need to develop a Bill of Materials (BOM) Maturity Model, which includes defining various maturity levels and their corresponding assignments. It is dependent on another issue, identified as #34. To address this, one should first review the details of issue #34 and then proceed to outline the maturity levels for the BOM model.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Software-Component-Verification-Standard/issues/35", @@ -17397,6 +17987,7 @@ "node_id": "I_kwDODDeA9s5OTtAa", "title": "Create BOM Maturity Model in JSON", "body": "Depends on: #34 and #35", + "summary": "The issue requests the creation of a Bill of Materials (BOM) Maturity Model in JSON format. It also indicates that this task is dependent on the completion of issues #34 and #35. Therefore, it would be necessary to address those dependencies before proceeding with the BOM Maturity Model.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Software-Component-Verification-Standard/issues/36", @@ -17427,6 +18018,7 @@ "node_id": "I_kwDODDeA9s5W7auq", "title": "PDF links", "body": "Re: https://github.com/OWASP/Software-Component-Verification-Standard/releases\r\n\r\nIn the PDF, all the links in Appendix B: References do not appear to be hyperlinks.\r\n", + "summary": "The issue reports that the links in Appendix B: References of the PDF are not functioning as hyperlinks. To resolve this, the links should be made clickable in the PDF document.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Software-Component-Verification-Standard/issues/37", @@ -17451,10 +18043,11 @@ "pk": 609, "fields": { "nest_created_at": "2024-09-11T20:45:03.351Z", - "nest_updated_at": "2024-09-12T02:41:10.855Z", + "nest_updated_at": "2024-09-13T16:25:21.266Z", "node_id": "I_kwDODDeA9s5nCeMu", "title": "Translate standard documentation into Vietnamese language", "body": "Software Supply Chain Security become a critical approach for many security programs. Vietnam is also a country that adopts new standards, and processes to enhance their chance in Software Supply Chain. Having a Vietnamese version of the standard may help easier to understand to onboard the standard for those users who are not security professional", + "summary": "The issue discusses the need for translating standard documentation related to Software Supply Chain Security into Vietnamese. This translation is essential for enhancing understanding among users in Vietnam who may not be security professionals. To address this, the translation work should be initiated to make the standards more accessible.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Software-Component-Verification-Standard/issues/38", @@ -17481,6 +18074,7 @@ "node_id": "MDU6SXNzdWU0NzE0MDIyMDM=", "title": "Mobile Operating System Threats", "body": "Compile a list of all Mobile Operating System Threats", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Mobile-Threatmodel/issues/1", @@ -17510,6 +18104,7 @@ "node_id": "MDU6SXNzdWU0NzE0MDk0ODU=", "title": "Threat Template", "body": "Template to be used for writing up threats ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Mobile-Threatmodel/issues/2", @@ -17534,10 +18129,11 @@ "pk": 612, "fields": { "nest_created_at": "2024-09-11T20:45:49.447Z", - "nest_updated_at": "2024-09-11T20:45:49.447Z", + "nest_updated_at": "2024-09-13T16:09:17.627Z", "node_id": "MDU6SXNzdWU0NzM2MjAzMzE=", "title": "Complete GitHub Checklist ", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Mobile-Threatmodel/issues/4", @@ -17566,6 +18162,7 @@ "node_id": "MDU6SXNzdWU1OTAzOTg5Mzg=", "title": "Copper: Webhook from Stripe to update Membership info on People", "body": "Utilize copper api for updating People with current membership information", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-staff/issues/9", @@ -17594,6 +18191,7 @@ "node_id": "MDU6SXNzdWU1OTQ0Njg2MjM=", "title": "Discussion on Topic x", "body": "asfasfdj;sdlfkads fasd;fljsd;fjasj", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-staff/issues/10", @@ -17620,6 +18218,7 @@ "node_id": "MDU6SXNzdWU3MDc1MDY1OTc=", "title": "Regional Event in a box", "body": "Created by Andrew van der Stock via monday.com integration. 🎉", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-staff/issues/11", @@ -17646,6 +18245,7 @@ "node_id": "MDU6SXNzdWU3MDc1MDY3MDQ=", "title": "Trainer splits", "body": "Created by Andrew van der Stock via monday.com integration. 🎉", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-staff/issues/12", @@ -17672,6 +18272,7 @@ "node_id": "MDU6SXNzdWU3MDc1MDY3OTE=", "title": "Hack-a-thon", "body": "Created by Andrew van der Stock via monday.com integration. 🎉", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-staff/issues/13", @@ -17694,10 +18295,11 @@ "pk": 618, "fields": { "nest_created_at": "2024-09-11T20:45:59.179Z", - "nest_updated_at": "2024-09-11T20:45:59.179Z", + "nest_updated_at": "2024-09-13T16:09:21.728Z", "node_id": "I_kwDOC7ie5M5ae9Ca", "title": "Missing README.md", "body": "hello there this branch is missing Readme.md file.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-staff/issues/15", @@ -17724,6 +18326,7 @@ "node_id": "I_kwDOC7ie5M5a7HXp", "title": "Marking of pages", "body": "Please mark (or tag, or make it in another way visible) staff pages as only applicable to staff. \r\n\r\nE.g. the \"Getting a Current Member List\" page, refers to Copper CRM. Logging on as an \"ordinary\" member does not bring me to the OWASP CRM, but one that I create by joining.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www-staff/issues/16", @@ -17750,6 +18353,7 @@ "node_id": "MDU6SXNzdWU1MjU5MDk3MzY=", "title": "An adequate Replacement for the Central Mediawiki File Management is needed", "body": "In my opinion we do need a good replacement for the mediawiki file management.\r\nThere is a need to provide OWASP-wide File management to avoid locally managed duplicates and a mass of unknown versions from the same document.\r\nLinks to internal OWASP-documents should look dirrerent from external links. \r\nPlease deploy a solution and give us guidelines how to use it.\r\n\r\nI am sorry to say, but in my opinion this is a critical factor for any migration.\r\nCheers\r\nTorsten", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp.github.io/issues/21", @@ -17780,6 +18384,7 @@ "node_id": "I_kwDOC5rsi85eRwcl", "title": "Other languages", "body": "The page is only in English, but, shouldn't be a version in Spanish or other languages?\n\nThis could make the project more visible to the world and be able to reach more people.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp.github.io/issues/213", @@ -17806,6 +18411,7 @@ "node_id": "I_kwDOC5rsi85kf_Pm", "title": "There are some issues on starting the owasp virtual machine", "body": "# There are some issues on starting the owasp virtual machine with the lastest version of virtualbox, and I didn`t get the solution of it!\r\n# version of virtualbox: 7.0.6\r\n# version of owasp: 1.2\r\n# Could you please give me some of the solutions on starting up the virtual machine? Thanks a lot! ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp.github.io/issues/235", @@ -17832,6 +18438,7 @@ "node_id": "I_kwDOC5rsi853m96u", "title": "ddf", "body": "vpn", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp.github.io/issues/288", @@ -17858,6 +18465,7 @@ "node_id": "I_kwDOC5rsi858uHNU", "title": "OWASP", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp.github.io/issues/296", @@ -17884,6 +18492,7 @@ "node_id": "I_kwDOC5rsi858uzh1", "title": "verification of employment agreement ", "body": "HTTP/1.1 200 OK\n\nAccept-Ranges: bytes\n\nCache-Control: max-age=0, private, must-revalidate\n\nContent-Security-Policy: default-src 'none'; base-uri 'self'; child-src github.com/assets-cdn/worker/ gist.github.com/assets-cdn/worker/; connect-src 'self' uploads.github.com www.githubstatus.com collector.github.com raw.githubusercontent.com api.github.com github-cloud.s3.amazonaws.com github-production-repository-file-5c1aeb.s3.amazonaws.com github-production-upload-manifest-file-7fdce7.s3.amazonaws.com github-production-user-asset-6210df.s3.amazonaws.com cdn.optimizely.com logx.optimizely.com/v1/events api.githubcopilot.com objects-origin.githubusercontent.com *.actions.githubusercontent.com wss://*.actions.githubusercontent.com productionresultssa0.blob.core.windows.net/ productionresultssa1.blob.core.windows.net/ productionresultssa2.blob.core.windows.net/ productionresultssa3.blob.core.windows.net/ productionresultssa4.blob.core.windows.net/ productionresultssa5.blob.core.windows.net/ productionresultssa6.blob.core.windows.net/ productionresultssa7.blob.core.windows.net/ productionresultssa8.blob.core.windows.net/ productionresultssa9.blob.core.windows.net/ github-production-repository-image-32fea6.s3.amazonaws.com github-production-release-asset-2e65be.s3.amazonaws.com insights.github.com wss://alive.github.com; font-src github.githubassets.com; form-action 'self' github.com gist.github.com objects-origin.githubusercontent.com; frame-ancestors 'none'; frame-src viewscreen.githubusercontent.com notebooks.githubusercontent.com octocaptcha.com; img-src 'self' data: github.githubassets.com media.githubusercontent.com camo.githubusercontent.com identicons.github.com avatars.githubusercontent.com github-cloud.s3.amazonaws.com objects.githubusercontent.com secured-user-images.githubusercontent.com/ user-images.githubusercontent.com/ private-user-images.githubusercontent.com opengraph.githubassets.com github-production-user-asset-6210df.s3.amazonaws.com customer-stories-feed.github.com spotlights-feed.github.com objects-origin.githubusercontent.com *.githubusercontent.com; manifest-src 'self'; media-src github.com user-images.githubusercontent.com/ secured-user-images.githubusercontent.com/ private-user-images.githubusercontent.com github-production-user-asset-6210df.s3.amazonaws.com gist.github.com; script-src github.githubassets.com; style-src 'unsafe-inline' github.githubassets.com; upgrade-insecure-requests; worker-src github.com/assets-cdn/worker/ gist.github.com/assets-cdn/worker/\n\nContent-Type: text/html; charset=utf-8\n\nDate: Sun, 21 Jan 2024 15:52:06 GMT\n\nETag: W/\"f8454f92b2a4aa386c6c70ba970e7239\"\n\nReferrer-Policy: origin-when-cross-origin, strict-origin-when-cross-origin\n\nServer: GitHub.com\n\nSet-Cookie: _gh_sess=e7U%2FTQ5KxEerNJgmTJq8Fx9TKw6uyZM4Wwzgj9tJ%2FTwmwZGU7cM3oG4Xldq5C6WsVfmo6Odyl%2FrvYLrSGtUKInxbEh87UnXTLFLeOcpQtOGU5AARG3RVAbSEHSsyQU3aeB3C8jP3oUNKr03XaKhJ%2Bl8btrzDq3ZykrWKCRAipsgZZYQB3DVEoXFCVql2FBV5Qo9JRxKB%2FFcrOSsmGtXxa0JfP%2FIDfyM9di0ah3kvLaxfjgACyc1zZbpM0z3C%2FLk98u3EgkIzYLmToA%2B6DU%2BB%2Fg%3D%3D--Px7IQLSsZR2icfgh--IG6YyYrXh9jRUkBgWWKcqQ%3D%3D; Path=/; HttpOnly; Secure; SameSite=Lax\n\nSet-Cookie: _octo=GH1.1.1517904614.1705852326; Path=/; Domain=github.com; Expires=Tue, 21 Jan 2025 15:52:06 GMT; Secure; SameSite=Lax\n\nSet-Cookie: logged_in=no; Path=/; Domain=github.com; Expires=Tue, 21 Jan 2025 15:52:06 GMT; HttpOnly; Secure; SameSite=Lax\n\nStrict-Transport-Security: max-age=31536000; includeSubdomains; preload\n\nTransfer-Encoding: chunked\n\nVary: X-PJAX, X-PJAX-Container, Turbo-Visit, Turbo-Frame, Accept-Encoding, Accept, X-Requested-With\n\nX-Content-Type-Options: nosniff\n\nX-Frame-Options: deny\n\nX-GitHub-Request-Id: 7F29:7B30:1652281:1E8980E:65AD3DA6\n\nX-XSS-Protection: 0\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n \n \n \n \n \n\n \n\n \n \n \n \n \n \n\n \n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n Forgot your password? · GitHub\n\n\n\n \n\n \n \n\n\n \n\n\n\n\n \n \n\n \n \n\n \n \n \n \n \n\n\n\n \n\n \n\n\n\n\n \n\n \n\n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n\n\n\n \n\n\n\n \n\n\n \n \n \n \n\n \n\n \n\n\n\n \n\n\n \n\n \n\n \n\n \n \n \n\n\n\n\n\n \n\n \n\n \n
\n \n\n\n
\n Skip to content\n \n \n \n \n \n\n\n\n\n\n\n\n\n\n \n \n
\n\n\n\n\n \n\n
\n \n
\n\n\n \n
\n\n
\n\n\n\n\n\n\n\n\n\n \n \n\n\n\n\n\n\n \n
\n \n
\n
\n \n
\n

Reset your password

\n
\n
\n\n\n\n \n
\n\n
\n \n \n \n
\n

Verify your account

\n\n
\n \"Waiting\n
\n\n
\n \n \n\n
\n\n
\n \n
\n\n \n
\n\n \n
\n \n\n\n\n
\n\n
\n
\n\n
\n \n
\n\n\n \n\n\n \n\n \n\n
\n
\n
\n
\n\n \n\n\n\n\n\n \n\n
\n
\n \n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp.github.io/issues/297", @@ -17910,6 +18519,7 @@ "node_id": "I_kwDOC5rsi858vun1", "title": "error message not found ", "body": "https://github.com/microsoft/vscode/issues/202966", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp.github.io/issues/298", @@ -17936,6 +18546,7 @@ "node_id": "I_kwDOC5rsi86LHHRP", "title": "The number of URLs is increasing for 'CSP: script-src unsafe-inline' and 'CSP: style-src unsafe-inline' after fixing 'CSP: Wildcard Directive'", "body": "Hello.\r\n1) I has the next report:\r\n![image](https://github.com/OWASP/owasp.github.io/assets/28634785/75dd9c7f-655f-4238-aac7-a09b39be48d5)\r\nThe value of CSP was \r\n**\"default-src 'self'; script-src 'self' cdn.jsdelivr.net 'unsafe-inline'; img-src 'self' validator.swagger.io bootswatch.com getbootstrap.com data:; style-src 'self' cdn.jsdelivr.net 'unsafe-inline'; font-src 'self' cdn.jsdelivr.net data:; connect-src 'self' bootswatch.com;\"**\r\n2) I fixed CSP: Wildcard Directive by adding **form-action 'self'; frame-ancestors 'self'** and received the next report:\r\n![image](https://github.com/zaproxy/zaproxy/assets/28634785/c3c02798-52a3-448b-9645-77f0f4cde9d2)\r\n3) My question is why the number of URLs in **CSP: script-src unsafe-inline** and **CSP: style-src unsafe-inline** was increased? ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp.github.io/issues/313", @@ -17958,10 +18569,11 @@ "pk": 628, "fields": { "nest_created_at": "2024-09-11T20:46:12.507Z", - "nest_updated_at": "2024-09-11T20:46:12.507Z", + "nest_updated_at": "2024-09-13T04:18:07.591Z", "node_id": "I_kwDOC5rsi86LdgsJ", "title": "@yaseenaljamal", "body": "https://www.apache.org/licenses/LICENSE-2.0.txt", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp.github.io/issues/317", @@ -17988,6 +18600,7 @@ "node_id": "MDU6SXNzdWU1MzgwNTk3MzA=", "title": "Subdirs", "body": "\r\n.. are not working or am I missing something?\r\n\r\nFor our German Chapter we have links like https://www.owasp.org/index.php/OWASP_German_Chapter_Stammtisch_Initiative/Hamburg and https://www.owasp.org/index.php/Germany/Chapter_Meetings (cc @bkimminich).\r\n\r\nThx, Dirk\r\n\r\n\r\n\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www--site-theme/issues/32", @@ -18014,6 +18627,7 @@ "node_id": "MDU6SXNzdWU1NzAwMjE0NTE=", "title": "Twitter cards social preview content needed", "body": "It looks like the site inserts OpenGraph metatags (for Facebook etc), however it doesn't insert the tags necessary for Twitter which is used quite extensively as a sharing platform in the industry. It would be nice if the necessary tags for that were populated as well.\r\n\r\nRefs:\r\n- https://developer.twitter.com/en/docs/tweets/optimize-with-cards/overview/abouts-cards\r\n- https://brianbunke.com/blog/2017/09/06/twitter-cards-on-jekyll/\r\n\r\nEdit: It seems twitter will fall back to OpenGraph.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www--site-theme/issues/52", @@ -18040,6 +18654,7 @@ "node_id": "MDU6SXNzdWU2NTI5OTczNDU=", "title": "Cookie banner not compliant with EU law", "body": "I'm not a lawyer, but I think we might be making fools of ourselves with this cookie banner (see screenshot) that doesn't even meet current EU legislation demanding an \"opt in\" to all tracking and non-essential cookies and not accepting plain \"Accept\"-banners any longer...\n\n https://edpb.europa.eu/sites/edpb/files/files/file1/edpb_guidelines_202005_consent_en.pdf\n\n![](https://i.imgur.com/E0gDeUr.png)\n\nI ran an automated conformity test, and the `_ga` and `_gid` cookies (Google Analytics) need to be locked until explicitly accepted by the user in an opt-in fashion. The website I used marked the other cookies from CloudFlare and Stripe as essential and therefore compliant.\n\nReport can be found in the corresponding Slack discussion: https://owasp.slack.com/files/U1S23SNE7/F016556FB61/report-owasporg-4183554.pdf\n\n_Sent from my Pixel 3 XL using [FastHub](https://play.google.com/store/apps/details?id=com.fastaccess.github)_", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www--site-theme/issues/65", @@ -18066,6 +18681,7 @@ "node_id": "MDU6SXNzdWU4NDIxNjY5NTk=", "title": "OWASP/www-chapter-colombo: Page build failure", "body": "After updating Colombo chapter page repo [1], I noticed there was a page build failure. \r\nI highly appreciate if you can help me to fix this.\r\n\r\n`The page build failed for the `master` branch with the following error:\r\n\r\nA file was included in `/_layouts/col-sidebar.html` that is a symlink or does not exist in your `_includes` directory. For more information, see https://docs.github.com/github/working-with-github-pages/troubleshooting-jekyll-build-errors-for-github-pages-sites#file-is-a-symlink.\r\n\r\nFor information on troubleshooting Jekyll see:\r\n\r\n https://docs.github.com/articles/troubleshooting-jekyll-builds\r\n\r\nIf you have any questions you can submit a request at https://support.github.com/contact?repo_id=221790172&page_build_id=242477598\r\n\r\n`\r\n[1] https://github.com/OWASP/www-chapter-colombo\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www--site-theme/issues/79", @@ -18088,10 +18704,11 @@ "pk": 633, "fields": { "nest_created_at": "2024-09-11T20:46:20.590Z", - "nest_updated_at": "2024-09-11T20:46:20.590Z", + "nest_updated_at": "2024-09-13T16:09:29.696Z", "node_id": "I_kwDOC5rfps6FvH03", "title": "Improve the OWASP UI", "body": "The overall UI for the entire OWASP site looks and feels inconsistent, which can cause new visitors to quickly lose their trust in OWASP itself, which doesn't give a good impression, especially for an organisation that focuses on cybersecurity.\r\n\r\nI'm aware that OWASP use a customised theme for the site, but I believe that they can benefit from having their website redesigned from the ground up, so that it looks more consistent, trustworthy, and professional, not just for newcomers, but also for returning visitors and paying members.\r\n\r\nFurther, isn't one of OWASP's Top 10 items Insecure Design?\r\n\r\nNot only does that relate to how cybersecurity solutions themselves are designed (so that they can be secure), but also how those solutions (specifically user interfaces on an app, websites, etc) look visually, since in this case, you pretty much have to judge a book by its cover to determine whether it is secure and trustworthy by design (both in terms of security and aesthetics), since poorly designed websites and badly formatted emails can be viewed as a cybersecurity threat, and in most cases, that's how they're often perceived.\r\n\r\nSource: I am currently a UI Designer for a cybersecurity startup, so improving the UI for OWASP is something that I am fully able to do.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/www--site-theme/issues/113", @@ -18114,10 +18731,11 @@ "pk": 634, "fields": { "nest_created_at": "2024-09-11T21:07:22.730Z", - "nest_updated_at": "2024-09-11T21:07:22.730Z", + "nest_updated_at": "2024-09-13T16:09:36.725Z", "node_id": "I_kwDOC5InlM5bC3YM", "title": "Head and Body tag is missing in 'index.html' file.", "body": "Head and Body tag is missing in 'index.html' file. This may cause problem in live reloading as well in SEO.", + "summary": "", "state": "open", "state_reason": "reopened", "url": "https://github.com/OWASP/video-portal/issues/7", @@ -18144,6 +18762,7 @@ "node_id": "MDU6SXNzdWU0OTQ0OTQ3MDg=", "title": "The Output of the script is showing jumbled characters", "body": "Hello team, \r\n\r\n Nice work with this static tool. I love it. \r\n\r\nThis issue might not be of code and more of my linux (ubuntu) setup. Please find attached image showing this. \r\n\r\nRequest your inputs on how to make it look like a neat table like in your screenshot..\r\n\r\n![jumbled](https://user-images.githubusercontent.com/55435569/65027406-e6daec80-d957-11e9-8562-3f67630516b7.PNG)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wpBullet/issues/1", @@ -18166,10 +18785,11 @@ "pk": 636, "fields": { "nest_created_at": "2024-09-11T21:07:32.198Z", - "nest_updated_at": "2024-09-11T21:07:32.198Z", + "nest_updated_at": "2024-09-13T16:09:43.429Z", "node_id": "MDU6SXNzdWU0OTQ0OTY4NzE=", "title": "Installation of PIP doesn't work", "body": "Hi team, \r\n\r\n This might be obvious to few people, but i wanted to put it out here anyway for future reference. \r\n\r\nI tried installing pip-python, but it installs an older version by default and doesn't install the requirements as well. \r\n\r\nWhat worked is when i installed pip3/python3 as follows in UBUNTU:\r\n\r\n apt install python3\r\n apt install python3-pip\r\n pip3 install -r requirements.txt\r\n python3 wpbullet.py -path=\"\"", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wpBullet/issues/2", @@ -18192,10 +18812,11 @@ "pk": 637, "fields": { "nest_created_at": "2024-09-11T21:08:24.538Z", - "nest_updated_at": "2024-09-11T21:08:24.538Z", + "nest_updated_at": "2024-09-13T04:19:07.338Z", "node_id": "MDU6SXNzdWU0NTIxNjQ3MTA=", "title": "CheatSheetSeries.Java.pdf not clear about the fact it's java", "body": "The list of cheatsheets at the bottom, are not presented as Java related. All we have is a Java logo on the top left, and that's it. We should say how to do it.\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/community-docs/issues/1", @@ -18224,6 +18845,7 @@ "node_id": "MDU6SXNzdWU0MzE2NDAwNjg=", "title": "Better define MFA support", "body": "Take NPM as an example - there is an option to configure MFA, and you can enforce it per package (see the [docs](https://docs.npmjs.com/about-two-factor-authentication)). But once MFA is enabled, you cannot use a CI to publish your packages - publish is done automatically. I think this should be reflected somehow, WDYT?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/packman/issues/2", @@ -18250,6 +18872,7 @@ "node_id": "MDU6SXNzdWU0MzY0MTE1NTg=", "title": "Add WAPM", "body": "Web Assembly Package Manager was announced today, seems like a valid modern package manager to add to the list.\r\n\r\nhttps://medium.com/wasmer/announcing-wapm-the-webassembly-package-manager-18d52fae0eea", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/packman/issues/12", @@ -18276,6 +18899,7 @@ "node_id": "MDU6SXNzdWU0MzkwMzkwNjg=", "title": "Review proposed Golang ecosystem changes", "body": "https://go.googlesource.com/proposal/+/master/design/25530-sumdb.md", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/packman/issues/13", @@ -18304,6 +18928,7 @@ "node_id": "MDU6SXNzdWU1NjQ2NzY3OTk=", "title": "Clarification of support levels for items in Tiers", "body": "Can we add a description of values and their definitions for security criteria items so that it is better understood what each mean. For example, I found the following for [npm.md](./npm.md) unclear:\r\n* Strong authentication: Partial - what does it mean?\r\n* Update notifications - Partials - means what exactly? Is it just the single maintainer who published but not all others who are listed as maintainers or the team that manages it?\r\n* Package Manager Does Not Run Code - Optional - If it is optional, how does this score? is it a +1 for flagging as passing the criteria or not?\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/packman/issues/18", @@ -18330,6 +18955,7 @@ "node_id": "MDU6SXNzdWU1NjQ3MjExNjc=", "title": "Suggestion: add new criteria for Package Manager FS operations", "body": "I would like to add a new criteria for package managers which revolves around other operations that they do. We've nailed down running code with `Package Manager Does Not Run Code` and doing call-home stuff with `Package Manager Does Not Collect Info` but package managers also do filesystem operations, such as linking and so I want to add a new item `Package Managers Does FS Linking` (or can think of a better name if you have suggestions).\r\n\r\nThis is based on the recent security vulnerabilities that impacted all three popular JS package managers (npm, yarn and pnpm) due to their filesystem operations when packages with executables defined are installed. Full story for reference: https://snyk.io/blog/understanding-filesystem-takeover-vulnerabilities-in-npm-javascript-package-manager/", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/packman/issues/19", @@ -18356,6 +18982,7 @@ "node_id": "MDU6SXNzdWU1NjQ3MjM0ODA=", "title": "Suggestion: adding lockfile and security related to it", "body": "Due to recent research I published with regards to [lockfile security](https://snyk.io/blog/why-npm-lockfiles-can-be-a-security-blindspot-for-injecting-malicious-modules/) - I'd be happy if we also cover lockfiles as a general use-case, but also the traits of lockfiles, such as whether they are tracked for direct or all dependencies, do they include signatures or checksums and so on.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/packman/issues/20", @@ -18382,6 +19009,7 @@ "node_id": "MDU6SXNzdWU1NzAyNTE5OTk=", "title": "Operating System package Managers", "body": "I think OS packager managers are a major blind spot and I have done extensive research on many of them and their various ranking in this system.\r\n\r\nWould there be interest in me contributing my research on the security of brew, pacman, apt, yum, etc?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/packman/issues/21", @@ -18408,6 +19036,7 @@ "node_id": "MDU6SXNzdWU2MDE5ODg0MTY=", "title": "Update Python for recent and upcoming PyPI changes", "body": "Is this effort still active? I can't see much mention of it or activity around it apart from this repository and the original blog post announcement.\r\n\r\n[PyPI Now Supports Two-Factor Login via WebAuthn](http://pyfound.blogspot.com/2019/06/pypi-now-supports-two-factor-login-via.html) and [API tokens for uploads](http://pyfound.blogspot.com/2020/01/start-using-2fa-and-api-tokens-on-pypi.html), I'd be happy to write up a Packman page for Python/PyPI.\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/packman/issues/22", @@ -18434,6 +19063,7 @@ "node_id": "MDU6SXNzdWU4MTkxMzcxODE=", "title": "Corrections / Clarifications about Swift Package Manager", "body": "Thank you for putting together this resource. For the past year, I've been working on a proposal to add a [registry to Swift Package Manager](https://github.com/apple/swift-evolution/blob/main/proposals/0292-package-registry-service.md), and I found the information in this project helpful as I considered how that should work.\r\n\r\nI'm happy to report that the new registry interface promises to substantially improve the security and reliability story for Swift Package Manager (both immediately, and in follow-up proposals building on top of the original). \r\n\r\nIn the meantime, I wanted to contribute to this project by offering corrections or clarifications to some details in the current report:\r\n\r\n* * *\r\n\r\n> ### Security Contacts and Process\r\n>\r\n> To satisfy this requirement, the package manager must have a way to receive security information from the community and a process for handling such feedback. A published email such as security@, together with a mechanism to ensure that the feedback is captured and responded to would satisfy this requirement.\r\n\r\nThe majority of Swift packages (>99.9%) are hosted on GitHub. Some packages define a security policy on GitHub (for example, see [@Flight-School/Money](https://github.com/Flight-School/Money/security/policy)). \r\n\r\nThis is currently listed as **No**. If we follow the same criteria as we do for MFA, should this be updated to **Optional** with a comment? \r\n\r\n* * *\r\n\r\n> ### Code Signing\r\n>\r\n> It should be possible for developers to sign their code. When they do, the package manager should verify the signatures and provide a way for those to be distributed to consumers of the package.\r\n\r\n\r\nSwift Package Manager has built-in support for [code-signed binary artifacts](https://github.com/apple/swift-evolution/blob/master/proposals/0272-swiftpm-binary-dependencies.md#binary-signatures). \r\n\r\nThis is currently listed as **No**. Should this be updated to **Partial** with a comment?\r\n\r\n* * *\r\n\r\n> ### Package Manager Does Not Run Code\r\n>\r\n> The package manager should not run code on package install.\r\n\r\nIn fact, Swift Package Manager _does_ evaluate package manifests. On macOS, it's run through a sandboxed process (`sandbox-exec`) to mitigate the effects of arbitrary code execution (see https://github.com/apple/swift-package-manager/blob/4bef41f12f082d55a4cb979a489379bfcf69f0c7/Sources/PackageLoading/ManifestLoader.swift#L776). \r\n\r\nThis is currently listed as **Yes** (that is, it doesn't run code). Should this entry be updated to **No** or something else?\r\n\r\n* * *\r\n\r\n> ### Integrity verification\r\n> \r\n> Package manager provides a method for verifying the integrity of the downloaded package.\r\n>\r\n> None - no integrity verification is done Partial - integrity verification is done using a weak method* Yes - Verification is done using a sufficiently secure method\r\n\r\nIntegrity verification is offered for [binary artifacts](https://github.com/apple/swift-evolution/blob/master/proposals/0272-swiftpm-binary-dependencies.md#checksum-computation), using SHA256. Conventional, source-based dependencies don't currently offer any integrity verification functionality. \r\n\r\nThis isn't currently listed for Swift Package Manager. Should this entry be set to **No**, **Partial**, or something else?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/packman/issues/23", @@ -18456,10 +19086,11 @@ "pk": 647, "fields": { "nest_created_at": "2024-09-11T21:08:37.437Z", - "nest_updated_at": "2024-09-11T21:08:37.437Z", + "nest_updated_at": "2024-09-13T16:10:29.385Z", "node_id": "I_kwDOCsQxbc5VqGws", "title": "Track if a published version can be deleted", "body": "Very useful project. Thank you.\r\n\r\nThere was a twitter thread a few months ago, around the time of [this incident](https://twitter.com/balloob/status/1545510244381446144).\r\n\r\n\"which package ecosystems protect downstream libs/apps from a published version of an upstream lib being deleted\"\r\nhttps://twitter.com/mrinal/status/1546250871784108033\r\n\r\nThe thread captured, state of this for:\r\n\r\n- Rust https://twitter.com/mrinal/status/1546250872711024641\r\n- JS https://twitter.com/mrinal/status/1546250873851875329\r\n- Go https://twitter.com/mrinal/status/1546250874908921858\r\n- Erlang/Elixir https://twitter.com/mrinal/status/1546250875961675776\r\n- Java / Scala/ other JVM https://twitter.com/mrinal/status/1546250876997685249\r\n- Ruby https://twitter.com/mrinal/status/1546250878008537089\r\n- Swift https://twitter.com/mrinal/status/1546250878977380353\r\n- Python https://twitter.com/mrinal/status/1546250879853928448\r\n- Github Packages https://twitter.com/mrinal/status/1546265142580547584\r\n- Docker Hub https://twitter.com/mrinal/status/1546266861989351424\r\n\r\nI [wished at the time](https://twitter.com/mrinal/status/1546270715334180866) for something like packman and today someone pointed me packman :)\r\n\r\nIf there is interest in tracking this aspect, I'd be happy to send pull request.\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/packman/issues/24", @@ -18482,10 +19113,11 @@ "pk": 648, "fields": { "nest_created_at": "2024-09-11T21:08:53.173Z", - "nest_updated_at": "2024-09-11T21:08:53.173Z", + "nest_updated_at": "2024-09-13T04:19:24.722Z", "node_id": "MDU6SXNzdWU1NzQyMjQ5MTI=", "title": "Make hook feature for report", "body": "Dot env for send data:\r\n`hook=\"URL$text\"`", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/D4N155/issues/11", @@ -18514,6 +19146,7 @@ "node_id": "MDU6SXNzdWU2OTE2MTQ1MzM=", "title": "Não está salvando a lista de textos estaticos apartir da forma interativa", "body": "Bct q ódio, resolver esse B.O né", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/D4N155/issues/17", @@ -18536,10 +19169,11 @@ "pk": 650, "fields": { "nest_created_at": "2024-09-11T21:09:12.077Z", - "nest_updated_at": "2024-09-11T21:09:12.077Z", + "nest_updated_at": "2024-09-13T16:10:49.058Z", "node_id": "MDU6SXNzdWU2Njg2ODQ3OTY=", "title": "Link to OWASP project page gives 404", "body": "It's shown in github's main page: https://www.owasp.org/index.php/OWASP_Vulnerable_Web_Application\r\nThanks", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Vulnerable-Web-Application/issues/4", @@ -18566,6 +19200,7 @@ "node_id": "MDU6SXNzdWU4NTUzNDEzMTE=", "title": "mysqli_error()", "body": "https://github.com/OWASP/Vulnerable-Web-Application/blob/c0f2689f4adc3dab4e310a4c709bbee9386c6b02/SQL/sql3.php#L43\r\nshould be mysqli_error()", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Vulnerable-Web-Application/issues/8", @@ -18592,6 +19227,7 @@ "node_id": "MDU6SXNzdWU2MDYxODk1MTY=", "title": "Maintain and develop new test cases for Cloud Testing guide", "body": "Hi everyone,\r\n\r\nI have seen this repo not update anymore from Nov 29, 2019. I have some question below:\r\n\r\n- How can I contribute for this repo?\r\n- Where I will find a template for test cases?\r\n- I think *Cloud Security Testing Guide* is better than this name now\r\n\r\nThank for your response.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-cstg/issues/3", @@ -18614,10 +19250,11 @@ "pk": 653, "fields": { "nest_created_at": "2024-09-11T21:09:26.043Z", - "nest_updated_at": "2024-09-11T21:09:26.043Z", + "nest_updated_at": "2024-09-13T16:10:58.503Z", "node_id": "I_kwDOClJnec5DVrCk", "title": "Project Development", "body": "**What would you like added?**\r\nThis is 2 years from the last Project update. Seem like this project is not developed anymore. So please assign me to join as a leader and contribute for this one. \r\n\r\nWould you like to be assigned to this issue?\r\n- [x] Assign me, please!\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-cstg/issues/6", @@ -18642,10 +19279,11 @@ "pk": 654, "fields": { "nest_created_at": "2024-09-11T21:09:35.034Z", - "nest_updated_at": "2024-09-11T21:09:35.034Z", + "nest_updated_at": "2024-09-13T16:11:05.605Z", "node_id": "MDU6SXNzdWU2MTkzMTkwMDM=", "title": "Update Contribution Guidelines", "body": "Enhance the 'CONTRIBUTING.md' doc with more details", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Intelligent-Intrusion-Detection-System/issues/7", @@ -18677,6 +19315,7 @@ "node_id": "MDU6SXNzdWU2MTkzMjA2NjQ=", "title": "Add coding guidelines doc", "body": "Add a document describing the coding guidelines and standards that the developers of this project should follow", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Intelligent-Intrusion-Detection-System/issues/8", @@ -18708,6 +19347,7 @@ "node_id": "MDU6SXNzdWU2NTgzNzE3OTE=", "title": "Add a Feature Extractor for input Pcap files", "body": "A Feature Extractor, which would extract all the necessary features from the Input Pcap file and save it in a csv format.\r\nThe list of features could be found [here ](https://kdd.ics.uci.edu/databases/kddcup99/task.html)\r\n\r\nFor reference, Check this [Repo](https://github.com/nrajasin/Network-intrusion-dataset-creator)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Intelligent-Intrusion-Detection-System/issues/14", @@ -18734,6 +19374,7 @@ "node_id": "MDU6SXNzdWU4MjA5NDc3MDY=", "title": "Wiki page have some bugs", "body": "**Describe the bug**\r\nThe view of the wiki page is broken. The HTML tags are visible rather than the sentence also the image is also giving a 404 error. Also, the resources which are provided are repeating also all the links are broken and pointing to just the home of the Github repo.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. We just have to simply modify the wiki page on GitHub.\r\n\r\n**Expected behavior**\r\nThe page should not contain HTML tags and the image should be visible.\r\n\r\n**Screenshots**\r\n![Screenshot from 2021-03-03 15-19-29](https://user-images.githubusercontent.com/44300735/109787672-7b143e80-7c34-11eb-9181-228320c5a54d.png)\r\n![Screenshot from 2021-03-03 15-19-34](https://user-images.githubusercontent.com/44300735/109787684-7ea7c580-7c34-11eb-9413-fdc111d10074.png)\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu 18.04\r\n - Browser: Chrome\r\n\r\n**Additional context**\r\n@hardlyhuman I would like to fix it, also will you please give some details on the image that has to be there.\r\nThanks\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Intelligent-Intrusion-Detection-System/issues/24", @@ -18760,6 +19401,7 @@ "node_id": "MDU6SXNzdWU4MzA4NzQyNTE=", "title": "requirements.txt Not Present", "body": "Django CI/build is failing as there is no **requirements.txt** file. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Intelligent-Intrusion-Detection-System/issues/26", @@ -18782,10 +19424,11 @@ "pk": 659, "fields": { "nest_created_at": "2024-09-11T21:09:45.435Z", - "nest_updated_at": "2024-09-11T21:09:45.436Z", + "nest_updated_at": "2024-09-13T16:11:10.336Z", "node_id": "MDU6SXNzdWU0MTM5MzE3NjQ=", "title": "Add Secure-Header-Checker tool written in python", "body": "Hi, Can I make PR that contain Secure-Header-Checker tool written in python for this project?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Secure-Headers/issues/1", @@ -18812,6 +19455,7 @@ "node_id": "MDU6SXNzdWU0NjQ3Mzk0NTI=", "title": "Information about tools and payloads", "body": "Hi, team! Thanks for the great project. I think it would be useful to add more information about different tools that can be used to test an application and detect security issues. For example, Arachni, ZAP, Burp Suite, .etc. One more thing that would be useful for application developers, QA engineers, security experts - links or examples of possible payloads that can be used to test an application API. There is a cool repository, called PayloadAllTheThings that contains a lot of payload examples to use during a security testing process. I can provide more information about tools and useful sources if it needed.\r\n\r\nBest regards,\r\nIhor", + "summary": "The issue suggests enhancing the project by including more information on various tools for application testing and security issue detection, such as Arachni, ZAP, and Burp Suite. Additionally, it proposes adding links or examples of potential payloads for API testing, referencing the \"PayloadAllTheThings\" repository as a valuable resource. The author is open to providing further information about tools and sources if required. \n\nTo address this issue, consider compiling a list of recommended tools and payloads along with relevant documentation or examples.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/17", @@ -18842,6 +19486,7 @@ "node_id": "MDU6SXNzdWU0OTY2ODQ0OTc=", "title": "Translation es ", "body": "Hi, I found this resources super helpful and I would like non-english speakers to be able to benefit from this as well. \r\n\r\nDo you have translation projects or do you accept translation PRs? \r\nI see the sources are in `2019/en`, so I guess I could create a `2019/es` for a Spanish translation to work in. \r\n\r\nWould that be something you'd want/like? I'm willing to make the Spanish translation, but let me know if there is an established way to contribute on this matter (maybe there is a separated repo for translations or you already have a Spanish version that I couldn't find. ", + "summary": "A user has expressed interest in translating resources into Spanish to make them accessible for non-English speakers. They are inquiring whether there are existing translation projects or if translation pull requests (PRs) are accepted. The user proposes creating a `2019/es` folder for the Spanish translation and is willing to contribute. Clarification is sought on how to proceed with this contribution, including any established protocols or existing resources. \n\nTo address this, it would be beneficial to provide information on the translation process, any existing repositories for translations, or confirmation if a Spanish version is already available.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/22", @@ -18874,6 +19519,7 @@ "node_id": "MDU6SXNzdWU2Nzg3MDkzODk=", "title": "Translation German", "body": "Hey!\r\n\r\nI really appreciate your efforts and think this projects should be translated into as many languages as possible.\r\nI'd like to provide a translation into german.\r\n\r\nCheers,\r\nch4rl353y", + "summary": "A user has expressed appreciation for the project's efforts and is interested in contributing by providing a German translation. It may be beneficial to outline the process for submitting translations and any specific guidelines that should be followed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/36", @@ -18905,6 +19551,7 @@ "node_id": "MDU6SXNzdWU5MzQ4NjU0NDE=", "title": "Translation Dutch", "body": "Hi everybody,\r\n\r\nWe want to translate API-Security to the Dutch language, any contributors are welcome!", + "summary": "The issue discusses the need for translating the API-Security documentation into Dutch and invites contributors to assist with this task. If interested, potential contributors should offer their help in the translation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/44", @@ -18936,6 +19583,7 @@ "node_id": "I_kwDOCdd2e85AjLIG", "title": "Translation Chinese", "body": "Hi Team, \r\n\r\nGlad to see this project, I am an engineer in this area, I could contribute to the Chinese version.\r\n\r\n\r\n\r\n", + "summary": "A user is expressing interest in contributing to the project by providing a Chinese translation. It would be beneficial to coordinate with this individual to facilitate the translation process and integrate their contributions effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/46", @@ -18967,6 +19615,7 @@ "node_id": "I_kwDOCdd2e85PJnlO", "title": "Turkish Translation", "body": "Hey!\r\n\r\nFirst of all thanks for this outstanding repository about API security.\r\nI would be happy to contribute to the translation of this repository into **Turkish**.\r\n\r\n\r\n![htp](https://i.giphy.com/media/FnGJfc18tDDHy/giphy.webp)", + "summary": "A user has expressed interest in contributing to the translation of the repository into Turkish. To move forward, it would be beneficial to establish a process for managing translations and possibly provide guidelines for contributors.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/58", @@ -18998,6 +19647,7 @@ "node_id": "I_kwDOCdd2e85aRUqH", "title": "Hindi Translation", "body": "Hey there,\r\nMore than happy to do translation in hindi. I'm developer from India", + "summary": "A user has expressed their willingness to contribute by providing Hindi translations for the project. It may be beneficial to coordinate with them to outline specific areas or content that require translation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/59", @@ -19029,6 +19679,7 @@ "node_id": "I_kwDOCdd2e85glIOu", "title": "Odata with EF and .Net core Security risks with Front End queries through web components", "body": "A demo would be good to show Odata with EF and .Net core Security risks while we query using expression trees that hold any query.'\r\nWould the security be compromised at some point in operation?", + "summary": "The issue discusses potential security risks associated with using OData with Entity Framework in a .NET Core application, particularly when handling front-end queries through web components. The author suggests that a demonstration would be beneficial to illustrate these risks, especially in the context of expression trees that can represent any query. The main concern is whether the security of the application could be compromised during operations involving these queries. It may be necessary to explore and implement security measures to mitigate these risks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/99", @@ -19055,6 +19706,7 @@ "node_id": "I_kwDOCdd2e85x6X4h", "title": "Contradictory risk classification for \"Unsafe Consumption of APIs\"", "body": "The *Exploitability* of [API10:2023][1] is graded with the highest rating of `easy`.\r\nAt the same time, the corresponding textual explanation actually tells the opposite, that exploitation of this should be rather hard:\r\n\r\n```\r\nExploiting this issue requires attackers to identify and potentially compromise other APIs/services the target API integrated with. Usually, this information is not publicly available or the integrated API/service is not easily exploitable.\r\n```\r\n\r\n[1]: https://owasp.org/API-Security/editions/2023/en/0xaa-unsafe-consumption-of-apis/", + "summary": "There is a contradiction in the risk classification for \"Unsafe Consumption of APIs\" where the exploitability is rated as `easy`, while the explanation suggests that exploitation is actually difficult due to the need for attackers to identify and compromise other integrated APIs/services, which are typically not publicly accessible or easily exploitable. \n\nTo resolve this issue, a review of the exploitability rating and the accompanying explanation is needed to ensure they align accurately.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/123", @@ -19070,8 +19722,8 @@ "repository": 1067, "assignees": [], "labels": [ - 159, - 160 + 160, + 159 ] } }, @@ -19084,6 +19736,7 @@ "node_id": "I_kwDOCdd2e85yAnUc", "title": "Persian Translation for 2023", "body": "Hi Guys\r\nI could contribute to translate the API TOP10 2023RC to Farsi/Persian & would want to know your opinion on this matter. We did the Persian translation for 2019 version as well.\r\n@PauloASilva \r\n", + "summary": "A request has been made to contribute a Persian translation for the API TOP10 2023RC, building on a previous translation effort for the 2019 version. Feedback and opinions on this contribution are being sought. It would be beneficial to assess the feasibility and scope of this translation project.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/124", @@ -19101,8 +19754,8 @@ 298 ], "labels": [ - 156, - 160 + 160, + 156 ] } }, @@ -19115,6 +19768,7 @@ "node_id": "I_kwDOCdd2e856DNXN", "title": "Translation to Portuguese (pt-PT) for 2023 version", "body": "Hello,\r\nI want to help with the translation to Portuguese (pt-PT) for 2023 version.\r\nI've followed the instructions detailed here: [https://github.com/OWASP/API-Security/issues/124#issuecomment-1737308376](https://github.com/OWASP/API-Security/issues/124#issuecomment-1737308376)\r\nAnd I'm working on the translation here: [https://github.com/RiuSalvi/API-Security/tree/translation/pt-pt](https://github.com/RiuSalvi/API-Security/tree/translation/pt-pt)\r\n\r\nBest regards,\r\nRui Silva", + "summary": "The issue discusses the intention to assist with the translation of the 2023 version into Portuguese (pt-PT). The contributor has followed the provided instructions and is currently working on the translation in a designated repository. To progress, further collaboration or review of the translation work may be needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/127", @@ -19132,8 +19786,8 @@ 299 ], "labels": [ - 156, - 160 + 160, + 156 ] } }, @@ -19146,6 +19800,7 @@ "node_id": "I_kwDOCdd2e86Eehb0", "title": "Translation to brasilian portuguese (pt-BR)", "body": "Hi Guys\r\nI could contribute to translate the API TOP10 2023 to Brasilian Portuguese (pt-BR). \r\n\r\nRegards,\r\nLuca Regne", + "summary": "A user has expressed willingness to contribute by translating the API TOP10 2023 to Brazilian Portuguese (pt-BR). It may be useful to review the translation process and guidelines to facilitate this contribution.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/131", @@ -19161,8 +19816,8 @@ "repository": 1067, "assignees": [], "labels": [ - 156, - 160 + 160, + 156 ] } }, @@ -19171,10 +19826,11 @@ "pk": 672, "fields": { "nest_created_at": "2024-09-11T21:10:16.989Z", - "nest_updated_at": "2024-09-12T02:32:00.271Z", + "nest_updated_at": "2024-09-13T16:26:39.350Z", "node_id": "I_kwDOCdd2e86UP58v", "title": "Reference to OWASP Risk Rating Methodology", "body": "https://github.com/OWASP/API-Security/blob/ef8e6b306de85950cd6df8a30671566e5bd134e3/editions/2023/en/0xd0-about-data.md?plain=1#L68C1-L68C67\r\n\r\nOWASP Risk Rating Methodology (https://owasp.org/www-community/OWASP_Risk_Rating_Methodology) references to OWASP Risk Assessment Framework (https://owasp.org/www-project-risk-assessment-framework/).\r\n\r\nProbably due to the changes on OWASP project pages.", + "summary": "The issue highlights the need for updated references to the OWASP Risk Rating Methodology and the OWASP Risk Assessment Framework within the provided documentation link. It suggests that changes to OWASP project pages may have affected the existing links. To resolve this, the documentation should be reviewed and corrected to ensure accurate and current references are provided.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/API-Security/issues/133", @@ -19201,6 +19857,7 @@ "node_id": "MDU6SXNzdWU4MTI2NTEzNDc=", "title": "New CS proposal: React Security CheatSheet", "body": "## What is the proposed Cheat Sheet about?\r\nBuilding secure React applications by avoiding common vulnerabilities.\r\n\r\n## What security issues are commonly encountered related to this area?\r\nCWE-79: Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')\r\nCWE-602: Client-Side Enforcement of Server-Side Security\r\nCWE-603: Use of Client-Side Authentication\r\nCWE-22: Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal')\r\nCWE-89: Improper Neutralization of Special Elements used in an SQL Command\r\nCWE-94: Improper Control of Generation of Code ('Code Injection')\r\n\r\n## What is the objective of the Cheat Sheet?\r\nExamples of vulnerable code and how to fix it. \r\n\r\n## What other resources exist in this area?\r\nI've written about this topic, and made videos related to it in the past. I want to make some new content that goes deeper and broader here.\r\n\r\nhttps://www.youtube.com/watch?v=VtNotePFuJY\r\nhttps://snyk.io/blog/10-react-security-best-practices/\r\nhttps://medium.com/javascript-security/avoiding-xss-in-react-is-still-hard-d2b5c7ad9412\r\nhttps://medium.com/javascript-security/avoiding-xss-via-markdown-in-react-91665479900\r\nhttps://www.synopsys.com/software-integrity/training/software-security-courses/react-js-security.html", + "summary": "A new proposal has been made for a React Security Cheat Sheet aimed at helping developers build secure React applications by addressing common vulnerabilities. Key issues identified include cross-site scripting (CWE-79), client-side security enforcement (CWE-602), client-side authentication (CWE-603), path traversal (CWE-22), SQL command vulnerabilities (CWE-89), and code injection (CWE-94). The objective is to provide examples of vulnerable code alongside solutions for fixing these issues. The proposal also references existing resources and content that delve into these security topics. To move forward, it would be beneficial to compile and organize these examples and solutions into a comprehensive guide.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/543", @@ -19232,6 +19889,7 @@ "node_id": "MDU6SXNzdWU4NjY5Mjk3Nzk=", "title": "Update: Vulnerable Dependency Management Cheat Sheet with Dependency Confusion", "body": "## What is missing or needs to be updated?\r\n\r\nI have found this [post](https://medium.com/@alex.birsan/dependency-confusion-4a5d60fec610) about *Dependency Confusion* attack and I think that it can be interesting to add a section about protection against this attack in the [Vulnerable Dependency Management Cheat Sheet](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.md).\r\n\r\n## How should this be resolved?\r\n\r\nI propose to add a small section showing some protection that can applied.\r\n\r\nThanks a lot in advance and also thanks a lot for your amazing work on this project ❤️ \r\n", + "summary": "The issue suggests adding a section on \"Dependency Confusion\" attacks to the Vulnerable Dependency Management Cheat Sheet. It references a blog post that provides insights on this topic and proposes including protective measures against such attacks. An update should be made to the cheat sheet to incorporate this information.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/641", @@ -19263,6 +19921,7 @@ "node_id": "MDU6SXNzdWU5MjY0MTcwMTM=", "title": "Update: [Multifactor Authentication Cheat Sheet]: Further info about TOTP secret-key storage", "body": "## What is missing or needs to be updated?\r\nThere is no info about the proper way to store users' secrets for TOTP MFA. Should these secrets be stored in plaintext or be 2-way encrypted? If encrypted should it use a dedicated key on the server or use something like the user's password or email address or username?\r\n\r\n## How should this be resolved?\r\nA brief subsection or bullet point, or perhaps a link to a separate cheat-sheet, describing the industry-standard (if such exists) for how to store the users' secret-keys for TOTP authentication. Or at least a line saying whether these secrets need to be encrypted or not.\r\n\r\nI don't know the answer to the above question, so I won't propose any text.", + "summary": "The issue highlights the absence of guidance on the proper storage methods for users' secrets related to TOTP MFA. Specifically, it questions whether these secrets should be stored in plaintext or encrypted, and if encrypted, what key should be used for encryption. \n\nTo resolve this, it is suggested to add a subsection or bullet point to the cheat sheet that outlines the industry standards for storing TOTP secret keys, including recommendations on encryption practices. If no definitive answer exists, at minimum, a statement regarding the necessity of encryption should be included.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/678", @@ -19293,6 +19952,7 @@ "node_id": "I_kwDOCbL1IM5C5qWm", "title": "Update: Secrets_Management_CheatSheet.", "body": "## What is missing or needs to be updated?\r\n\r\nThe following tasks require to be executed post-mvp of the cheatsheet as agreed with various team-members:\r\n\r\n- [ ] Further extend and re-evaluate the concepts of https://github.com/OWASP/CheatSheetSeries/pull/842\r\n- [ ] show more how you can create an architecture that allows for rotation in examples (so not just tell, but show examples)\r\n- [ ] show how passwordless (openID connect) can help, but tokens need security as well (might be a cheatsheet for to refer to?)\r\n- [ ] The ease of onboarding needs to be further expanded as explained by @dominikdesmit https://owasp.slack.com/archives/C02LSHXKVU5/p1643453874955679?thread_ts=1643442691.915449&cid=C02LSHXKVU5\r\n\r\n\r\n\r\nPlease note that I only file this issue for tracking as requested by multiple team-members for now, but will not have the time to pick this up timely myself.", + "summary": "The issue outlines several tasks that need to be addressed for the Secrets Management CheatSheet after the MVP phase. The tasks include:\n\n- Extending and re-evaluating concepts from a specific pull request.\n- Providing examples of architecture that facilitates rotation.\n- Demonstrating how passwordless authentication (like OpenID Connect) can enhance security, while also addressing token security.\n- Expanding on the ease of onboarding, as discussed by a team member.\n\nThis issue serves as a tracking mechanism for these updates, indicating that further contributions are needed from the team to progress on these items.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/845", @@ -19323,6 +19983,7 @@ "node_id": "I_kwDOCbL1IM5FfdWK", "title": "New Security with CORS CS ", "body": "## What is missing or needs to be updated?\r\nThe [HTML5 security cheat sheet](https://cheatsheetseries.owasp.org/cheatsheets/HTML5_Security_Cheat_Sheet.html) currently says\r\n\r\n> Keep in mind that CORS does not prevent the requested data from going to an unauthorized location. It's still important for the server to perform usual [CSRF](https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html) prevention.\r\n\r\nIt is absolutely true that CSRF protection is required for CORS, but it's a little imprecise. Specifically, the [Cross-Site Request Forgery Prevention Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#cross-site-request-forgery-prevention-cheat-sheet) lists the double-submit cookie as the first line of defense, but that pattern is not possible in a CORS environment. (Unless the Origin and request domains happen to be subdomains of a shared parent on which the cookie could be set.)\r\n\r\n## How should this be resolved?\r\n\r\nI suggest clarifying -- in either or both of the cheat sheets -- which CSRF protection mechanisms are best suited to CORS environments.\r\n\r\nMixMax's [Using CORS policies to implement CSRF protection](https://www.mixmax.com/engineering/modern-csrf) has some additional information on using Origin verification to protect against CSRF attacks on CORS requests.", + "summary": "The issue highlights a gap in the current HTML5 security cheat sheet regarding the relationship between CORS and CSRF protection. It points out that while CSRF protection is necessary in CORS scenarios, the existing guidance may not adequately address the limitations of the double-submit cookie technique in such environments. The author suggests that the cheat sheets should clarify which CSRF protection mechanisms are most effective for CORS and references additional resources for further information on this topic. \n\nTo resolve this, it would be beneficial to update the cheat sheets to include guidance on appropriate CSRF protection strategies specifically tailored for CORS contexts.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/875", @@ -19353,6 +20014,7 @@ "node_id": "I_kwDOCbL1IM5Ww4_8", "title": "Update: Cross-Site Request Forgery Prevention Cheat Sheet", "body": "## What is missing or needs to be updated?\r\n\r\nCross-Site Request Forgery attacks occur because untrusted browser code can cause requests to be sent to a vulnerable server that are \"special\" in a way that the attacker could not send from their own machines. Typically this \"special\"-ness is that they contain cookies from a targeted user's browser. Less commonly, this could mean they contain basic auth credentials or that the server makes authorization assumptions based on the client's network (eg, explicitly looking at the client's IP address to make decisions, or by putting the server on a private network and assuming that any client talking to this server is authorized).\r\n\r\nThe mitigations discussed in the cheat sheet all assume that these \"special\" request qualities must exist in your system. However, an entirely different approach to solving CSRF is to avoid giving special treatment based on cookies, basic auth, and network properties. This approach may be appropriate for web servers that are primarily serving \"API-style\" traffic and which websites only communicate with via AJAX-style calls.\r\n\r\nFor example, a server that makes authentication decisions only based on a custom HTTP header (perhaps set from a value stored in browser `localStorage`) or a value in the body of a POST and not on cookies or network should be considered successfully defended against CSRF. Developers who have built servers with this property should not feel like they need to also set up token-based mitigation or (if #1010 is accepted) header-based mitigation.\r\n\r\n\r\n## How should this be resolved?\r\n\r\nA new top-level section should be added describing \"don't care about cookies/etc\" as an appropriate mitigation technique, with the drawback that it is not helpful if you need your endpoint to be accessible via HTML `
`s.", + "summary": "The issue highlights a gap in the Cross-Site Request Forgery (CSRF) Prevention Cheat Sheet regarding the handling of CSRF attacks. It notes that current mitigations assume the presence of \"special\" request characteristics, such as cookies or network properties. The proposal suggests adding a new section that discusses an alternative approach, where servers can avoid relying on these elements for authentication, particularly in API-style traffic scenarios. This approach could involve using custom HTTP headers or values in the request body instead. However, it also mentions that this method may not be suitable for endpoints accessible via HTML forms. \n\nTo address this issue, a new section should be added to the cheat sheet that outlines this alternative mitigation technique, along with its limitations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1011", @@ -19383,6 +20045,7 @@ "node_id": "I_kwDOCbL1IM5aLtfG", "title": "Update: Kubernetes Security", "body": "## What is missing or needs to be updated?\r\nIt is still pointing to Pod Security Policies, but they were removed in 1.25 and replaced with [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/)\r\n\r\n## How should this be resolved?\r\nReview throughout for PSP references and update them, perhaps by having old guidance < 1.25 and newer guidance.", + "summary": "The issue highlights that the documentation is still referencing Pod Security Policies (PSP), which were removed in Kubernetes version 1.25, and should be updated to reflect the current use of Pod Security Admission. It suggests a thorough review of the documentation to replace PSP references with relevant guidance, including distinctions between pre-1.25 and post-1.25 practices.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1041", @@ -19413,6 +20076,7 @@ "node_id": "I_kwDOCbL1IM5bmtmv", "title": "Improve WCAG compliance", "body": "Steps:\r\n- Load https://wave.webaim.org/report#/https://cheatsheetseries.owasp.org/index.html\r\n\r\nActual results:\r\n- 11 Errors\r\n- 9 Contrast Errors\r\n- 7 Alerts \r\n- 9 Features\r\n- 16 Structural Elements\r\n- 15 ARIA\r\n\r\nExpected results:\r\nThe website is WCAG compliant", + "summary": "The issue reports the need for improvements to meet WCAG compliance on a specific website. Currently, the site has multiple accessibility issues, including 11 errors, 9 contrast errors, 7 alerts, among others. The expected outcome is to achieve full WCAG compliance. To address this, a review and remediation of the identified issues are necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1058", @@ -19439,6 +20103,7 @@ "node_id": "I_kwDOCbL1IM5e5ucY", "title": "Update: XSS Prevention Cheat Sheet to show common failed anti-patterns attempted as XSS defense", "body": "## What is missing or needs to be updated?\r\nThe current XSS Prevention Cheat Sheet is great at describing what developers _should_ do to prevent XSS vulnerabilities and that definitely should be (and is) its primary focus. But despite that, there is a lot of bad advice elsewhere on the Internet (e.g., Stack Overflow is one such common source of misinformation out there) where developers ignore the advice in this XSS Prevention CS in search of a short cut so that they can avoid providing specific output encoding at or near the hundreds or more given XSS sinks that SAST or DAST or manual code reviews discover.\r\n\r\nInstead of using the approaches recommended in the XSS Prevention CS, I often find developers trying to solve XSS vulnerabilities with a single, simple, stop-gap solution, such as _only_ using CSP headers as a defense or _only_ using HTTP interceptors to provide HTML sanitization or output encoding in a single specific context (usually HTML). While those approaches might be useful as a defense-in-depth layer (depending on the situation), the never actually fully remediate the XSS vulnerabilities.\r\n\r\nIf this were just a few cases it would be easy enough to deal with, but for every case where I find these misguided approaches during code review, or every time that @xeno6696 or I correct someone asking an ESAPI-related XSS question on Stack Overflow, I am confident that we are missing several others. However, both Matt and I have seen these approaches both in code and discussed as \"solutions\" frequently enough to realize that it is a common anti-pattern.\r\n\r\nTherefore, I think it would be useful to have a small section in the XSS Prevention CS to call out how **_NOT_** to defend against XSS vulnerabilities.\r\n\r\n## How should this be resolved?\r\nTo partially address this problem, I have wrote up the detailed ESAPI wiki post, [XSS Defense: No Silver Bullets](https://github.com/ESAPI/esapi-java-legacy/wiki/XSS-Defense:-No-Silver-Bullets) that I now refer developers I find doing this (which are a lot) to that wiki page. However, as I mentioned in https://github.com/OWASP/CheatSheetSeries/issues/517#issuecomment-1435757162, I think this advice would be more effective if the essential parts of it can be distilled and included in the XSS Prevention Cheat Sheet where I think many more developers look first.\r\n\r\nSo, I am proposing to create a PR that will highlight the main points from the ESAPI wiki page in the XSS Prevention CS and then link to it for anyone wanting to see more details.", + "summary": "The XSS Prevention Cheat Sheet currently focuses on best practices for preventing XSS vulnerabilities, but it lacks a section addressing common anti-patterns that developers mistakenly use as solutions. Many developers resort to ineffective measures, such as relying solely on CSP headers or HTTP interceptors, which do not fully address XSS issues. To improve the cheat sheet, it is suggested to add a section outlining these ineffective defenses and to distill key points from an existing detailed ESAPI wiki post on XSS defense. A pull request (PR) is proposed to include these insights in the cheat sheet and provide a link to the detailed information for further reference.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1092", @@ -19470,6 +20135,7 @@ "node_id": "I_kwDOCbL1IM5gU0sO", "title": "Update: CSRF prevention cheat sheet to offer more detail on SameSite cookie limitations", "body": "\r\n\r\n## What is missing or needs to be updated?\r\n\r\nWith regards to SameSite cookies, the cheat sheet says this:\r\n\r\n> It is important to note that this attribute should be implemented as an additional layer defense in depth concept. This attribute protects the user through the browsers supporting it, and it contains as well 2 ways to bypass it as mentioned in the following [section](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.3.7.1). This attribute should not replace having a CSRF Token. Instead, it should co-exist with that token in order to protect the user in a more robust way.\r\n\r\nThe 2 ways to bypass described in the linked document are:\r\n\r\n> 1. Attackers can still pop up new windows or trigger top-level\r\n navigations in order to create a \"same-site\" request (as described in [section 2.1](https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-rfc6265bis-02#section-2.1)), which is only a speedbump along the\r\n road to exploitation.\r\n\r\n> 2. Features like `` [[prerendering](https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-rfc6265bis-02#ref-prerendering)] can be\r\n exploited to create \"same-site\" requests without the risk of user\r\n detection.\r\n\r\nHowever, other commentary I've found (e.g. [this SO post](https://security.stackexchange.com/questions/263135/are-these-samesite-vulnerabilities-still-present-today)) suggests that those bypass approaches might be dated and/or limited in scope.\r\n\r\n## How should this be resolved?\r\n\r\n\r\nIt would be nice if the cheat sheet addressed these limitations in more detail. Can SameSite be a first-class mitigation mechanism under certain scenarios (e.g. no domain sharing, no mutating GET requests)? What attacks are still possible?\r\n\r\n\r\n", + "summary": "The issue discusses the need for a more detailed update to the CSRF prevention cheat sheet, specifically regarding the limitations of SameSite cookies. The current cheat sheet mentions that SameSite should complement CSRF tokens for better protection but does not elaborate on the specific bypass methods outlined in the referenced document. It highlights two potential bypass techniques that may not reflect current understanding and questions whether SameSite could serve as a primary defense in certain contexts. The resolution suggests enhancing the cheat sheet by clearly addressing these limitations, exploring scenarios where SameSite might be effective, and identifying remaining vulnerabilities.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1101", @@ -19500,6 +20166,7 @@ "node_id": "I_kwDOCbL1IM5iZsDH", "title": "Update: [CSRF] Mention that non-signed Double Submit Cookie tokens can be generated client & server side.", "body": "## What is missing or needs to be updated?\r\n[OWASP Double Submit cookie](https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#double-submit-cookie) doesn’t mention whether the token should be generated server or client side. However, it does so for the Synchronizer token pattern. \r\n\r\nThis has historically created many doubts:\r\n- https://stackoverflow.com/questions/2561564/client-generated-double-submit-cookie-cross-site-request-forgery-prevention\r\n- https://stackoverflow.com/questions/69526826/is-generating-csrf-token-on-the-front-end-a-bad-idea\r\n- https://stackoverflow.com/questions/74014826/generating-csrf-token-on-frontend-in-my-react-app\r\n- https://www.reddit.com/r/AppSecurity/comments/dscjh0/can_i_generate_csrf_tokens_on_the_client_side_in/\r\n\r\nUsing non-signed tokens, there's no difference between generating the token server or client side, since both:\r\n- can generate a cryptographical random value (Client side with the [Web Crypto API](https://developer.mozilla.org/en-US/docs/Web/API/Crypto/randomUUID))\r\n- set the token in a non-`HttpOnly` cookie\r\n\r\n\r\n## How should this be resolved?\r\nThe [Double Submit Cookie](https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#double-submit-cookie) section should state more specifically that the token can be generated both server and client side for non-signed tokens, but only server-side for signed token, thus eliminating confusion.\r\n", + "summary": "The issue highlights a gap in the OWASP Double Submit Cookie documentation regarding the generation of tokens. Specifically, it notes that the current information does not clarify whether tokens should be generated on the client or server side, leading to confusion among users. The proposal is to update the documentation to explicitly state that non-signed tokens can be generated on either side, while signed tokens should only be generated server-side. This clarification aims to resolve ongoing uncertainties reflected in various online discussions. \n\nTo address this issue, the documentation should be revised to include this distinction about token generation for both signed and non-signed tokens.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1111", @@ -19530,6 +20197,7 @@ "node_id": "I_kwDOCbL1IM5i2P26", "title": "Update: [Secure Product Design] Merge proposed Secure Design Principles Cheat Sheet and update", "body": "## What is missing or needs to be updated?\r\n\r\nThe Secure Product Design Cheat Sheet is a good start at describing how to incorporate some secure design principles into the design process and even lists out some secure design principles. The proposed Secure Design Principles Cheat Sheet (https://github.com/OWASP/CheatSheetSeries/issues/723) has a better list of secure design principles.\r\n\r\n## How should this be resolved?\r\n\r\nMerge the proposed Secure Design Principles Cheat Sheet (https://github.com/OWASP/CheatSheetSeries/issues/723) into the existing Secure Product Design Cheat Sheet\r\n", + "summary": "The issue highlights the need to enhance the Secure Product Design Cheat Sheet by incorporating a more comprehensive list of secure design principles from the proposed Secure Design Principles Cheat Sheet. The resolution involves merging these two documents to improve the guidance provided for secure design practices.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1114", @@ -19560,6 +20228,7 @@ "node_id": "I_kwDOCbL1IM5jW7Kn", "title": "Logging Vocabulary Cheetsheet", "body": "I propose that malicious_direct_reference failure should be critical since it's a top-tier data theft attack.", + "summary": "The issue suggests that the failure associated with malicious_direct_reference should be classified as critical due to its significant risk of data theft. It implies that action is needed to elevate the severity of this particular failure in the logging vocabulary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1118", @@ -19591,6 +20260,7 @@ "node_id": "I_kwDOCbL1IM5jviCG", "title": "input_validation_fail[:field,userid]", "body": "When validation discrete list items (like a yes/no control or a list of countries) and someone tries to select an item that is not on the list, that input validation failure is totally critical and can only be an attack activity. Maybe a new event is needed?\r\n\r\ncc Jet Anderson", + "summary": "The issue discusses the need for improved input validation for discrete list items, such as yes/no options or country selections. It highlights that selecting an item not on the list should be considered a critical validation failure, potentially indicating an attack. The suggestion is to create a new event for handling this specific type of input validation failure. It would be beneficial to implement a solution that addresses this concern effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1121", @@ -19620,6 +20290,7 @@ "node_id": "I_kwDOCbL1IM5k_UbV", "title": "logging vocab cheatsheet", "body": "Suggest a new logging vocab entry to detect SQL errors (potential SQLi)", + "summary": "The issue proposes adding a new entry to the logging vocabulary specifically aimed at detecting SQL errors that may indicate potential SQL injection vulnerabilities. To address this, the logging system needs to be updated to include patterns or keywords that can effectively capture and log these types of errors.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1127", @@ -19650,6 +20321,7 @@ "node_id": "I_kwDOCbL1IM5ml6YH", "title": "Update: HTTP_Headers_Cheat_Sheet ", "body": "## What is missing or needs to be updated?\r\n\r\nMy colleague Polčák and I have been conducting research on Network Error Logging (NEL) and its (in)security aspects. Please refer to https://arxiv.org/pdf/2305.05343.pdf (accepted for publication at SECRYPT'23).\r\n\r\nFor a quick introduction to NEL, please visit https://dcreager.net/nel/intro/.\r\n\r\nFrom an OWASP perspective, only the part of the article addressing the installation of a long-term tracker in case of a MitM position or the ability to influence content on a server portion is relevant. An example from our university: I have the ability to manipulate the content of https://www.fit.vutbr.cz/~ijerabek/, including HTTP headers. If I correctly send NEL-related headers, I will receive information about all visitors' movements on my website section, as well as on all other pages within the domain www.fit.vutbr.cz, potentially including subdomains.\r\n\r\nIn the article, we propose a simple prevention measure: if a web server does not send its own NEL headers, sending an NEL policy with a zero validity period will remove any previously installed policies (if they have reached the browser). In my example, this would mean that during the first visit to https://www.fit.vutbr.cz outside of /~ijerabek/, the attacker-inserted policy would be deleted, and the attacker would not receive reports of the user's further activity on this website.\r\n\r\nWe believe that it would be beneficial to add a short description of NEL header in HTTP Headers cheat sheet and add the recommendation with default zero validity policy that we came up with.\r\n\r\n## How should this be resolved?\r\n\r\nupdate: cheatsheets/HTTP_Headers_Cheat_Sheet.md\r\n\r\nAdd a short description of NEL header and add the recommendation to prevent the described attacks behavior by setting the default policy with 0 time policy.", + "summary": "The issue highlights the need to update the HTTP Headers Cheat Sheet to include information about Network Error Logging (NEL) and its security implications. It suggests adding a brief description of the NEL header along with a recommendation for a default zero validity policy to mitigate potential attacks related to NEL header manipulation. The proposed changes aim to enhance awareness and provide preventive measures against specific vulnerabilities associated with NEL. \n\nTo resolve this, the cheat sheet should be updated accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1135", @@ -19681,6 +20353,7 @@ "node_id": "I_kwDOCbL1IM5naIQp", "title": "Update: [CSRF] Improving the new Double Submit Cookie sections from #1110", "body": "## What is missing or needs to be updated?\r\nMy previous [PR](https://github.com/OWASP/CheatSheetSeries/pull/1139) to fix #1110 reintroduced an overview of the HMAC CSRF Token and added a Naive Double Submit Cookie and Encrypt Cookie sections.\r\n\r\nThe PR is merged into `main`, but there are some improvements left I believe should be addressed.\r\n\r\n## How should this be resolved?\r\n\r\n#### Improvement 1\r\nUncertain if [the pseudocode](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.md?plain=1#L101) should be marked with `code`. Maybe we should use `python` since it's close to python syntax, taking advantage of highlighting.\r\n\r\n#### Improvement 2\r\n\r\nPersonally, the new \"Signed Double Submit Cookie\" section does still not provide full mitigation against CSRF attacks, since it doesn't involve by default the idea of using session-dependent value. I would suggest instead of the header \"Signed Double Submit Cookie\", to use \"Session-dependent Signed Double Submit Cookie\", since a \"Session-independent Signed Double Submit Cookie\" is only a mitigation technique when combined with `Referer` validation to protect against:\r\n\r\n- Vulnerable subdomains or sibling domains\r\n- Man-in-the-middle (MITM) attack\r\n- Open redirect vulnerability\r\n\r\nActually, the \"Double Submit Cookie\" & \"Signed Double Submit Cookie\" pattern without a session-dependent value is only a defense-in-depth (DiD) technique.\r\n\r\nOnly the following two are true mitigation techniques:\r\n\r\n- Signed, session-independent, CSRF Token is **combined with** `Referer` validation. This is the CSRF mitigation technique of [Django](https://github.com/django/django/blob/9e3b327aca75b4b34abf6b00f1fd3c2cf65c8db6/django/middleware/csrf.py#L142), as they have always utilized [_a signed, session independent, Double Submit Cookie_](https://github.com/django/django/blob/e01970e9d23a241473671ea26126f8440db4dead/django/middleware/csrf.py#L142)) which by itself without `Referer` validation is vulnerable to both previously mentioned attacks.\r\n- Signed, session-dependent, CSRF Token\r\n\r\nI'm uncertain how to include easily the idea of \"session-dependent\" value without overwhelming the less experienced developers.\r\n\r\nFurthermore, the \"Naive Double Submit Cookie\" is a defense-in-depth technique and should probably be moved into the [Defense in Depth Techniques](https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#defense-in-depth-techniques) section? This would mean we have to restructure the \"Double Submit Cookie\" to introduce \"Signed Double Submit Cookie\" as the default from the beginning.\r\n\r\n#### Improvement 3\r\nThere is actually a bug I just noticed in the pseudocode. The [`request.setCookie()`](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.md?plain=1#L113) should actually be `response.setCookie()` since we set the CSRF cookie in the response.", + "summary": "The issue discusses the need for improvements to the Double Submit Cookie sections in the CSRF prevention cheat sheet. Key points include:\n\n1. **Pseudocode Formatting**: There is uncertainty about whether to format the pseudocode as `code` or `python` for better readability.\n2. **Session-Dependent Value**: The \"Signed Double Submit Cookie\" section lacks full mitigation against CSRF without incorporating a session-dependent value. The suggestion is to rename it to \"Session-dependent Signed Double Submit Cookie\" and clarify the associated vulnerabilities.\n3. **Defense-in-Depth Techniques**: The \"Naive Double Submit Cookie\" is proposed to be moved to the Defense in Depth Techniques section, requiring a restructuring of the current content.\n4. **Pseudocode Bug**: A bug in the pseudocode was identified where `request.setCookie()` should be corrected to `response.setCookie()`.\n\nTo address these issues, the content should be revised to incorporate session-dependent values, improve pseudocode formatting, restructure sections, and fix the identified bug.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1143", @@ -19712,6 +20385,7 @@ "node_id": "I_kwDOCbL1IM5pkXo0", "title": "Update: Session_Management_Cheat_Sheet", "body": "## What is missing or needs to be updated?\r\n\r\nThe [session management cheat sheet](https://cheatsheetseries.owasp.org/cheatsheets/Session_Management_Cheat_Sheet.html) lacks guidance regarding how to store and validate session token (and similar tokens) server-side:\r\n\r\n* store the session token ;\r\n* store hash of the session token ;\r\n* use a signed/MACed session token.\r\n\r\n## How should this be resolved?\r\n", + "summary": "The session management cheat sheet requires updates to include guidance on storing and validating session tokens on the server side. Specifically, it should address the following points: how to store the session token, how to store a hash of the session token, and the use of signed or MACed session tokens. To resolve this issue, these topics need to be researched and incorporated into the cheat sheet.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1153", @@ -19742,6 +20416,7 @@ "node_id": "I_kwDOCbL1IM5rLVQ1", "title": "New CS proposal: OAuth 2.0 Cheatsheet", "body": "## What is the proposed Cheat Sheet about?\r\nAs suggested by my conversations with the OWASP ASVS leaders, my proposals for ASVS requirements were too detailed (as they were taken straight from the official RFC), they suggested me to trim the ASVS requirements down but since my work on the detailed OAuth 2.0 requirements would go to waste, they suggested me to open an issue about adding it to the OWASP cheatsheets, hence this issue proposal. \r\n\r\nReference: \r\n\r\n\r\n## What security issues are commonly encountered related to this area?\r\nAuthorization and Authentication vulnerabilities in web apps, mobile apps and APIs. \r\n\r\n\r\n\r\n## What is the objective of the Cheat Sheet?\r\nTo provide a detailed information about OAuth 2.0 and its implementation. \r\n\r\n\r\n\r\n## What other resources exist in this area?\r\nOWASP ASVS \r\n\r\n\r\n\r\n\r\n", + "summary": "A proposal has been made to create a new cheat sheet focused on OAuth 2.0, following discussions with OWASP ASVS leaders. The goal is to condense detailed ASVS requirements into a more accessible format, while still preserving the valuable insights from the original, comprehensive work. This cheat sheet aims to address common security issues related to authorization and authentication in web and mobile applications, as well as APIs. It will serve as a resource for implementing OAuth 2.0 securely. To move forward, the project team may need to outline the key details and structure of the proposed cheat sheet.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1168", @@ -19772,6 +20447,7 @@ "node_id": "I_kwDOCbL1IM5slxwl", "title": "Update: Cross_Site_Scripting_Prevention_Cheat_Sheet.md - \"alphanumeric characters\" is not strictly defined", "body": "\r\n\r\n## What is missing or needs to be updated?\r\n\r\nThe [Output Encoding Summary Rules](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.md#output-encoding-rules-summary) section talks about alphanumeric characters without defining what that means. Many definitions have a caveat like \"Other characters also may be included in an alphanumeric character set\" or talk about some punctuation being included. It's also unclear whether that only includes the Latin charset or the full Unicode range. Taken strictly this could be `/[a-zA-Z0-9]/` and perhaps that's the intent as a safe baseline, but it could also be taken as `/[\\w\\d/]` which would change based on the regular expression engine and settings.\r\n\r\n## How should this be resolved?\r\n\r\nRemove or reduce uncertainty when talking about \"alphanumeric characters\" by using more concrete technical language, Unicode categories/ranges, or regular expression examples that don't depend on the regex engine.\r\n\r\n", + "summary": "The issue highlights the ambiguity surrounding the term \"alphanumeric characters\" in the Cross Site Scripting Prevention Cheat Sheet. The current description lacks a clear definition, which may lead to varying interpretations, particularly regarding the character set and whether it pertains to the Latin charset or Unicode. To address this, it is suggested to clarify the term by incorporating more precise technical language, specifying Unicode categories or ranges, or providing regular expression examples that are independent of different regex engines.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1175", @@ -19802,6 +20478,7 @@ "node_id": "I_kwDOCbL1IM5spIwl", "title": "Update: JSON Web Token Cheat Sheet for Java", "body": "\r\n\r\n## What is missing or needs to be updated?\r\n\r\nHow difficult would it be to make this multi-language? JWT's have sort of become a web standard as an authentication/authorization primitive. It would be nice to have the common operations (parsing, validating,\r\nbest practices etc...) spelled out in various languages (like `python`, `nodejs` etc...).\r\n\r\n## How should this be resolved?\r\nEither the creation of a JSON Web Token Cheat Sheet for or generalizing the Java cheat sheet. That might be something I could contribute to. But I'm not necessarily a JWT expert.", + "summary": "The issue highlights a need for expanding the JSON Web Token (JWT) Cheat Sheet for Java to include support for multiple programming languages, such as Python and Node.js. The suggestion is to either create dedicated cheat sheets for these languages or generalize the existing Java version to cover common operations like parsing and validating JWTs. There is an openness to contributions, but it is acknowledged that expertise in JWT may be limited. It would be beneficial to explore how to implement this multi-language support effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1176", @@ -19832,6 +20509,7 @@ "node_id": "I_kwDOCbL1IM5w3-Hm", "title": "Update: SQL_Injection_Prevention_Cheat_Sheet - SQL Injection", "body": "\r\n\r\nhttps://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html\r\n\r\n## What is missing or needs to be updated?\r\n(https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-4-escaping-all-user-supplied-input)\r\n\r\nDefense Option1 and Defense Option 2 are not enough to prevent SQL injection . In addition to option 1 or option 2 option 4 is required.\r\n\r\nDefense Option 4: Escaping All User-Supplied Input[¶]\r\nThis technique should only be used as a last resort, when none of the above are feasible. Input validation is probably a better choice as this methodology is frail compared to other defenses and we cannot guarantee it will prevent all SQL Injections in all situations.\r\n\r\n## How should this be resolved?\r\n\r\n\r\nDefense Option 4: Escaping All User-Supplied Input[¶](https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-4-escaping-all-user-supplied-input)\r\nThis technique must be used along with Option 1 and Option 2 . Input validation is probably a better choice as this methodology is frail compared to other defenses and we cannot guarantee it will prevent all SQL Injections in all situations however it may not always be practically feasible as end user(s) often require special characters\r\n\r\nWe need to explicitly mention Option 4 is also required as part of Option 1 and Option 2.\r\n\r\n\r\n\r\n\r\nWe have followed the usage of parametrized queries or sql stored procedure and still found application open to SQL injection related attacks. Here is an example how\r\n\r\nSQL Server sample - We had also seen a similar issue with postgress also \r\n\r\n```\r\ncreate table test(name nvarchar(2000))\r\n\r\ncreate procedure proc_test (@p nvarchar(2000))\r\nas\r\nbegin\r\nupdate test\r\nset name = @p\r\nend\r\n\r\n \r\n\r\ndeclare @p1 nvarchar(2000)\r\nset @p1= ''+@@version+''\r\nexec proc_test @p1\r\n\r\nselect * from test\r\n```\r\n\r\nIn the sample above the sql version gets stored in column test can be retrieved .\r\n\r\n\r\n", + "summary": "The issue highlights concerns regarding the SQL Injection Prevention Cheat Sheet, specifically the inadequacy of Defense Options 1 and 2 in preventing SQL injection attacks. It points out that Defense Option 4, which involves escaping all user-supplied input, should be incorporated as an essential measure alongside the other two options. However, it emphasizes that this technique should be a last resort due to its frailty and the challenges it presents in practical scenarios. The issue also provides an example demonstrating how vulnerabilities can persist even when using parameterized queries or stored procedures. \n\nTo address the issue, it is suggested that the Cheat Sheet explicitly state that Defense Option 4 must be considered in conjunction with Options 1 and 2, while also acknowledging the limitations of escaping input as a standalone defense.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1201", @@ -19863,6 +20541,7 @@ "node_id": "I_kwDOCbL1IM52_yVr", "title": "New CS proposal: Javascript Object Signing and Encryption (JOSE)", "body": "## What is the proposed Cheat Sheet about?\r\n\r\nJavascript Object Signing and Encryption. In particular **JWE**.\r\n\r\n## What security issues are commonly encountered related to this area?\r\n\r\n- How to configure JWE implementations to be secure. \r\n- Recommended encryption algorithms\r\n- Traps e.g. using the same asymmetric keys between JWT and JWE. In what circumstances is this bad?\r\n\r\n## What is the objective of the Cheat Sheet?\r\n\r\nTo help people implement secure JWE implementations.\r\n\r\n## What other resources exist in this area?\r\n\r\nWriting this because there seems to be very little guidance online, and some of it is contradictory.\r\n\r\nThe owasp cheatsheet has some guidance on best use of JWT (object signing) but no guidance on the usage of JWE.\r\n", + "summary": "A proposal has been made for a new Cheat Sheet focused on Javascript Object Signing and Encryption (JOSE), specifically addressing JWE (JSON Web Encryption). The Cheat Sheet aims to provide guidance on configuring secure JWE implementations, recommending encryption algorithms, and highlighting common pitfalls, such as the dangers of using the same asymmetric keys for both JWT and JWE. The objective is to assist users in implementing secure JWE practices, as existing resources are scarce and often contradictory, particularly in comparison to the guidance available for JWT. To move forward, it would be beneficial to compile comprehensive best practices and recommendations for JWE usage.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1225", @@ -19893,6 +20572,7 @@ "node_id": "I_kwDOCbL1IM534Ypy", "title": "New CS proposal: Incident Response for Web Applications", "body": "\r\n\r\n## What is the proposed Cheat Sheet about?\r\nThe proposed cheat sheet, titled \"Incident Response for Web Applications,\" aims to provide a practical guide for handling security incidents related to web applications. It will cover the essential steps, communication plans, evidence preservation, and analysis required during an incident response scenario.\r\n\r\n## What security issues are commonly encountered related to this area?\r\nCommon security issues related to incident response for web applications that will be addressed in this cheat sheet include:\r\n\r\n1. **Common Web Application Threats:**\r\n - Identification and response strategies for common threats, including SQL injection, cross-site scripting, and data breaches.\r\n\r\n2. **Denial of Service Attacks:**\r\n - Guidelines for detecting and mitigating distributed denial of service (DDoS) attacks targeting web applications.\r\n\r\n3. **Unauthorized Access and Account Compromise:**\r\n - Procedures for responding to incidents involving unauthorized access, compromised accounts, and user data breaches.\r\n\r\n4. **Data Integrity and Tampering:**\r\n - Strategies for detecting and responding to incidents related to data integrity and unauthorized tampering of web application content.\r\n\r\n5. **Malicious File Uploads:**\r\n - Response plans for incidents involving malicious file uploads and potential exploitation of file upload vulnerabilities.\r\n\r\n## What is the objective of the Cheat Sheet?\r\nThe objective of the \"Incident Response for Web Applications\" cheat sheet is to equip web developers, security teams, and incident responders with practical guidance for effectively managing and mitigating security incidents. The cheat sheet aims to:\r\n\r\n- Provide a structured incident response plan tailored to web application security incidents.\r\n- Offer communication templates for informing stakeholders and users during security incidents.\r\n- Outline evidence preservation techniques for forensic analysis and legal purposes.\r\n- Enhance the ability to quickly identify and respond to security incidents in a web application environment.\r\n\r\n## What other resources exist in this area?\r\n- **OWASP Incident Response Guide:**\r\n - Refer to the [OWASP Incident Response Guide](https://owasp.org/www-project-incident-response/) for additional insights into incident response best practices.\r\n\r\n- **NIST Cybersecurity Framework:**\r\n - Explore the [NIST Cybersecurity Framework](https://www.nist.gov/cyberframework) for a comprehensive framework that includes incident response guidelines.\r\n\r\n- **Incident Response Blogs and Articles:**\r\n - Review blogs and articles from incident response experts and organizations for real-world incident response scenarios and lessons learned.\r\n\r\n## References\r\n- [OWASP Incident Response Guide](https://owasp.org/www-project-incident-response/)\r\n- [NIST Cybersecurity Framework](https://www.nist.gov/cyberframework)\r\n\r\n", + "summary": "The proposed cheat sheet, \"Incident Response for Web Applications,\" aims to provide a structured guide for managing security incidents specific to web applications. It will address common security issues such as SQL injection, DDoS attacks, unauthorized access, data integrity threats, and malicious file uploads. The objective is to equip developers and incident responders with practical strategies, communication templates for stakeholders, and evidence preservation techniques for effective incident management. \n\nExisting resources such as the OWASP Incident Response Guide and the NIST Cybersecurity Framework can be referenced for additional insights. To move forward, the cheat sheet needs to be developed with comprehensive content addressing the outlined objectives and security issues.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1235", @@ -19924,6 +20604,7 @@ "node_id": "I_kwDOCbL1IM58hD4k", "title": "Update: Secrets Management Cheat Sheet", "body": "\r\n\r\n## What is missing or needs to be updated?\r\nDiscussion of secrets management in a multi-cloud environment.\r\n\r\n## How should this be resolved?\r\nInvestigation and discussion of issues. I am sure this is an evolving field and I am learning myself. It affects more than Dev and CI/CD. References to existing art.\r\n\r\n\r\n\r\n\r\n", + "summary": "The issue highlights the need for an update to the Secrets Management Cheat Sheet, specifically focusing on the management of secrets in a multi-cloud environment. It suggests that further investigation and discussion are required to address this evolving topic, which impacts more than just development and CI/CD processes. To resolve this, it may be beneficial to gather insights and references to existing literature on the subject.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1289", @@ -19956,6 +20637,7 @@ "node_id": "I_kwDOCbL1IM59nBQb", "title": "Shortlinks for cheatsheets", "body": "Does the cheatsheet site have a mechanism for shorter links, e.g.\r\n\r\nhttps://cheatsheetseries.owasp.org/SSRF\r\n\r\nwould redirect to:\r\n\r\nhttps://cheatsheetseries.owasp.org/cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html\r\n\r\n", + "summary": "The issue raises a question about the availability of shortened links for cheatsheets on the website. It suggests implementing a mechanism that would allow a shorter URL to redirect to the full cheatsheet link. Action could be taken to explore or create a URL shortening feature for improved accessibility.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1300", @@ -19986,6 +20668,7 @@ "node_id": "I_kwDOCbL1IM5-CuU6", "title": "New CS proposal: GitHub Actions", "body": "## What is the proposed Cheat Sheet about?\r\n\r\nIt will aim to provide guidance on configuring and utilising GitHub Actions securely.\r\n\r\n## What security issues are commonly encountered related to this area?\r\n\r\nGithub Actions security is needed to prevent supply chain attacks. GitHub Action injection attacks are also common, which can result in unauthorised code execution, modifying release packages, disclosure of secrets, etc.\r\n\r\n## What is the objective of the Cheat Sheet?\r\n\r\nIt will provide a comprehensive guide for developers and security practitioners with best practices and considerations for securing GitHub Actions workflows.\r\n\r\n## What other resources exist in this area?\r\n\r\nThere is an [official hardening guide](https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions) from Github. Even though it provides lots of guidance, there might be some additional things like whitelisting actions, using custom deployment protection rules, not using pull_request_target, etc. Also, to enable community support, it will be good to have an open-source guideline so that additional things can be added by the community.", + "summary": "A proposal is being made to create a Cheat Sheet focused on securely configuring and utilizing GitHub Actions. The Cheat Sheet aims to address common security issues, such as supply chain attacks and GitHub Action injection attacks, which can lead to unauthorized code execution and other vulnerabilities. Its objective is to serve as a comprehensive guide for developers and security professionals, outlining best practices for securing workflows. Existing resources include an official hardening guide from GitHub, but there is room for additional community-driven guidelines, particularly regarding whitelisting actions and deployment protection rules. It may be beneficial to compile these insights into a collaborative document for broader support and enhancement.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1306", @@ -20017,6 +20700,7 @@ "node_id": "I_kwDOCbL1IM5-btVE", "title": "Update: LDAP Injection Prevention Cheat Sheet", "body": "\r\n\r\nIntroduction\r\n\r\n## What is missing or needs to be updated?\r\n\r\n\r\nLINQtoLDAP\r\nthis project seems to be outdated. Responding to .NET-Standard 2.0 an 3.5 which are outdated too.\r\n\r\n## How should this be resolved?\r\n\r\n\r\nPlease check, if something like LINQtoLDAP is still necessary...\r\n\r\n\r\n############################################################################################\r\n\r\n\r\nDefense Option 2: Use Frameworks that Automatically Protect from LDAP Injection\r\n\r\n## What is missing or needs to be updated?\r\n\r\n\r\nLink to https://linqtoad.codeplex.com/ is outdated, as codeplex is offline since 2017.\r\nLINQ to Active Directory is deleted.\r\nThis project has been deleted.\r\nref. https://openhub.net/p/LINQtoAD\r\n\r\n## How should this be resolved?\r\n\r\n\r\nPlease check, if something like LINQtoAD is still necessary...\r\n\r\n\r\n############################################################################################\r\nPlease check, if this CheatSheet is still necessary\r\n", + "summary": "The issue highlights the need for updates to the LDAP Injection Prevention Cheat Sheet, specifically regarding outdated references and projects such as LINQtoLDAP and LINQ to Active Directory. The links provided are no longer valid, and the mentioned projects have been deleted or are considered outdated. \n\nTo resolve this, it is suggested to evaluate the relevance of LINQtoLDAP and similar frameworks in the context of current standards and practices. A thorough review of the Cheat Sheet is needed to ensure all information is accurate and up-to-date.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1313", @@ -20047,6 +20731,7 @@ "node_id": "I_kwDOCbL1IM5-lepz", "title": "Update: Credential_Stuffing_Prevention_Cheat_Sheet", "body": "\r\n\r\n## What is missing or needs to be updated?\r\n\r\nA couple of suggestions for the Credential Stuffing cheat sheet:\r\n1. MFA section should link to MFA cheat sheet (reciprocating the link to cred stuffing from MFA)\r\n2. With 2023 expansion in support for FIDO2 passkeys, the line that MFA may not be practical should be replaced with suggestion of passkeys to prevent cred stuffing.\r\n\r\n## How should this be resolved?\r\n\r\n\r\nChanges suggested inline above. Could also mention FIDO UAF or U2F device bound software or hardware passkeys as well- not sure if this is getting too far into the weeds for a cheat sheet?\r\n\r\n\r\n\r\n", + "summary": "The issue highlights the need for updates to the Credential Stuffing Prevention Cheat Sheet. Specifically, it suggests adding a reciprocal link to the MFA cheat sheet in the MFA section and replacing a statement about MFA practicality with a recommendation for using FIDO2 passkeys, given the recent expansion in their support. Additionally, there is a consideration for mentioning FIDO UAF or U2F device-bound software or hardware passkeys, though there is some uncertainty about the appropriateness of this detail for a cheat sheet format. To resolve this, the suggested changes should be implemented.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1315", @@ -20078,6 +20763,7 @@ "node_id": "I_kwDOCbL1IM5-7IXL", "title": "Update: Secure Product Design", "body": "\r\n\r\n## What is missing or needs to be updated?\r\n\r\n\r\n- Requires updates to account for current best practices\r\n- https://github.com/OWASP/CheatSheetSeries/issues/1114 \r\n\r\n\r\n## How should this be resolved?\r\n\r\n\r\n- I propose changing the current cheat sheet to align with the [Shifting the Balance of Cybersecurity Risk: Principles and Approaches for Secure by Design Software](https://www.cisa.gov/resources-tools/resources/secure-by-design) publication.\r\n- The intended final result would follow the broad structure of the publication, but in a more palatable and actionable form, along with links to existing cheat sheets for brevity.\r\n\r\nWould like open up discussion on whether it would make sense to rename the cheat sheet to `Secure By Design Cheat Sheet`\r\n\r\n", + "summary": "The issue highlights the need for updates to the Secure Product Design Cheat Sheet to align with current best practices and the guidelines outlined in the \"Shifting the Balance of Cybersecurity Risk: Principles and Approaches for Secure by Design Software\" publication. It suggests restructuring the cheat sheet for better clarity and actionability while incorporating links to existing resources. Additionally, there is a proposal to consider renaming the cheat sheet to \"Secure By Design Cheat Sheet.\" Discussion on these changes is encouraged.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1319", @@ -20109,6 +20795,7 @@ "node_id": "I_kwDOCbL1IM5-7Mvo", "title": "Update: User Privacy Protection Cheat Sheet", "body": "\r\n\r\n## What is missing or needs to be updated?\r\n\r\n- No mention of [OWASP Top 10 Privacy Risks & Countermeasures 2.0](https://owasp.org/www-project-top-10-privacy-risks/OWASP_Top_10_Privacy_Risks_Countermeasures_v2.0.pdf)\r\n\r\n\r\n## How should this be resolved?\r\n\r\n- Add background covering [OECD Privacy Guidelines](https://legalinstruments.oecd.org/en/instruments/OECD-LEGAL-0188) and [Privacy by Design Principles](https://iapp.org/resources/article/oipc-privacy-by-design-resources/)\r\n- Add OWASP Top 10 Privacy Risks as noted above\r\n- Add Privacy-enhancing technologies, mostly focusing on those frequently in use nowadays such as data masking techniques, maybe touch on ZKPs out of emerging\r\n\r\n\r\n\r\n", + "summary": "The current User Privacy Protection Cheat Sheet is missing key references and guidelines, specifically the OWASP Top 10 Privacy Risks & Countermeasures 2.0. To improve the Cheat Sheet, it is suggested to include an overview of the OECD Privacy Guidelines and the Privacy by Design Principles. Additionally, incorporating the OWASP Top 10 Privacy Risks and discussing contemporary privacy-enhancing technologies, such as data masking techniques and possibly zero-knowledge proofs, would be beneficial.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1320", @@ -20140,6 +20827,7 @@ "node_id": "I_kwDOCbL1IM5_GNEG", "title": "Update: DotNet Security Cheat Sheet", "body": "\r\n\r\n## What is missing or needs to be updated?\r\n\r\n\r\nChapter \"A01 Broken Access Control\"\r\n\r\n> The following requirement is included in Chapter A01:\r\n> \"Reduce the time period a session can be stolen in by reducing session timeout and removing sliding expiration\":\r\n```\r\nExpireTimeSpan = TimeSpan.FromMinutes(60),\r\nSlidingExpiration = false\r\n```\r\nThe following example is like this (line 32/33):\r\n```\r\n\t\t\t\tExpireTimeSpan = TimeSpan.FromMinutes(30),\r\n\t\t\t\tSlidingExpiration = true,\r\n```\r\nI don't think it's a bad idea using SlidingExploration - if it is implemented safely, especially using a short deadline, do you?\r\n\r\n## How should this be resolved?\r\n\r\n\r\nCan you please revise this article and form a consistent line?\r\n\r\n\r\n", + "summary": "The issue highlights a discrepancy in the \"A01 Broken Access Control\" section of the DotNet Security Cheat Sheet regarding session management. It questions the recommendation to avoid using sliding expiration for sessions, suggesting that it can be safe if implemented correctly with a short timeout. The request is to revise the article to present a consistent stance on session expiration and sliding expiration practices. \n\nTo resolve this, a review and update of the relevant sections in the Cheat Sheet is needed to align the guidance on session timeout and sliding expiration.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1325", @@ -20170,6 +20858,7 @@ "node_id": "I_kwDOCbL1IM6Ba9M8", "title": "Update: [XML External Entity Prevention Cheat Sheet]", "body": "\r\n\r\n## What is missing or needs to be updated?\r\n\r\n\r\nThe chapter on .net refers to version 4.5 - nobody should really be using that anymore. It contains references to dotnet_security_unit_testing - this project was created over 7 years ago. Is this still up to date or would it not be better to exclude it in order to avoid a false sense of security?\r\n\r\nThe chapter on iOS is \"up to date\" with iOS 6 from 2012\r\n\r\n## How should this be resolved?\r\n\r\n\r\nCan you please check whether some of the content is now obsolete?\r\nAs I am neither a specialist for .net nor for iOS, this should not be my cup of tea...\r\n\r\n\r\n", + "summary": "The XML External Entity Prevention Cheat Sheet requires updates in several areas. The .net section references an outdated version (4.5) and includes information about a project that is over seven years old, which may not be relevant anymore. Additionally, the iOS section is based on iOS 6 from 2012, making it significantly outdated. It is suggested to verify the current relevance of the content and consider excluding outdated references to ensure users are not misled. A review of the sections related to .net and iOS is necessary to address these concerns.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1354", @@ -20200,6 +20889,7 @@ "node_id": "I_kwDOCbL1IM6Bx1B3", "title": "New CS proposal: Software Supply Chain Security", "body": "\r\n\r\n## What is the proposed Cheat Sheet about?\r\n\r\nThe CS will provide an on overview of SSCS, its relevance to developers, and practical guidance on improving the security of SSCs.\r\n\r\n## What security issues are commonly encountered related to this area?\r\n\r\n\r\n- Known vulnerable components used to build software\r\n- Using compromised or insecure third-party services or tools to develop, build, deliver, or otherwise manage software (which may not necessarily be \"built\" into the software as in the above)\r\n- Compromise of build script or processes\r\n- Compromise of code repositories or packages\r\n- Compromise of deployment processes or runtime environment (such as pulling a malicious update)\r\n\r\n\r\n## What is the objective of the Cheat Sheet?\r\n\r\nThe main objectives of the cheatsheet are: (1) provide an understanding of the various components which comprise the SSC, (2) identify common threats to the SSC, and (3) provide practical guidance on how developers can mitigate SSC risk.\r\n\r\n\r\n## What other resources exist in this area?\r\n\r\n\r\n\r\n\r\n- [CISA: Defending Against Software Supply Chain Attacks](https://www.cisa.gov/resources-tools/resources/defending-against-software-supply-chain-attacks-0)\r\n- [NIST SP 800-161, Rev 1](https://csrc.nist.gov/pubs/sp/800/161/r1/final)\r\n- [NIST SP 800-218](https://csrc.nist.gov/pubs/sp/800/218/final)\r\n- OWASP ASVS V10\r\n- [SAFECode: Managing Managing Security Risks Inherent in the Use of Third-party Components](https://safecode.org/wp-content/uploads/2017/05/SAFECode_TPC_Whitepaper.pdf)\r\n- [SLSA](https://slsa.dev/spec/v1.0/)\r\n\r\n\r\n", + "summary": "A proposal for a Cheat Sheet on Software Supply Chain Security (SSCS) is being developed. This Cheat Sheet aims to provide an overview of SSCS, its significance for developers, and practical advice for enhancing the security of software supply chains. \n\nCommon security issues highlighted include the use of known vulnerable components, reliance on compromised third-party services, and threats to build scripts, code repositories, and deployment processes. \n\nThe Cheat Sheet's objectives are to educate readers about the components of the SSC, identify prevalent threats, and offer practical mitigation strategies for developers. \n\nExisting resources on the topic include guidelines from CISA, NIST, OWASP ASVS, and SAFECode, among others. \n\nNext steps may involve further outlining the content and organizing the information to effectively address the outlined objectives and issues.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1356", @@ -20231,6 +20921,7 @@ "node_id": "I_kwDOCbL1IM6E_oVq", "title": "Update: LDAP_Injection_Prevention_Cheat_Sheet", "body": "## What is missing or needs to be updated?\r\n\r\n\r\nAfter a casual reading, the Cheat Sheet seems to suggests that even passwords should always be escaped. When this is taken at face value, it can cause all kinds of issues, mainly stuff not working as expected any more (see https://github.com/dexidp/dex/issues/3433).\r\n\r\nThe `Safe Java Escaping Example` wrongly assumes that a password check against LDAP is done via LDAP filter and escapes the password, when actually password checking against an ldap server is (or at least should be) exclusively done via BIND request which safely accepts the password in unfiltered plaintext. \r\n\r\n## How should this be resolved?\r\n\r\nThe usual methods for password authentication against LDAP should be mentioned:\r\n* Single-Step Auth:\r\n - BIND-Request with DN constructed from template like `uid=${USERID},ou=people,dc=example,dc=com`. Whatever is inserted in that template should be escaped as needed. Password from user input is given to BIND call as is.\r\n* Three-Step-Auth:\r\n - BIND with credentials limited to searching - these would be configured in the app.\r\n - `SEARCH` LDAP for users DN like `(&(uid=${USERID})(deactivated=0))` or something. Again, anything inserted into that query should be escaped as needed\r\n - BIND with DN from search result above, if any. Password from user input is given to BIND call as is.\r\n \r\nThese two both assume the LDAP server to require (non-anonymous) BIND before allowing access to user data, which I think is (or should be) default or at least best practice. So `Enabling Bind Authentication` should be moved up in the document's focus, i.e. \"To prevent LDAP Injection, first the LDAP server needs to be configured properly...\". Even if it's out of scope of this document, proper safety precautions should be mentioned - \"Simple BIND transmits plain text passwords to the server, so it should only be used with an encrypted connection\" or \"If you **really** need to use unencrypted connection, make sure to only use SASL BIND with challenge protocols etc. to prevent password sniffing.\"\r\n\r\nThe `Safe Java Escaping Example` should be updated to reflect either one of the above methods and no longer use the password in a search filter.", + "summary": "The issue highlights concerns regarding the LDAP Injection Prevention Cheat Sheet, specifically the misleading suggestion that passwords should always be escaped. This can lead to functionality issues, as password checks against LDAP servers typically use BIND requests that accept plaintext passwords safely. \n\nTo resolve this, the Cheat Sheet should include proper methods for password authentication against LDAP, such as outlining both Single-Step and Three-Step authentication processes. It should emphasize the importance of enabling BIND authentication as a key security measure and include safety precautions regarding the use of plain text passwords. Additionally, the `Safe Java Escaping Example` needs to be revised to align with these methods and avoid using passwords in search filters.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1372", @@ -20261,6 +20952,7 @@ "node_id": "I_kwDOCbL1IM6KCpke", "title": "New CS proposal: Drone Security", "body": "\r\n\r\n## What is the proposed Cheat Sheet about?\r\n\r\nThis drone security Cheat Sheet aims to ensure the safe and secure operation of unmanned aerial vehicles (UAVs) in various mobile, web and cloud applications. \r\n\r\n## What security issues are commonly encountered related to this area?\r\n\r\nInsecure Communication Links, data transmitted can be intercepted\r\nWeak Authentication Mechanisms, default or weak passwords can allow unauthorized access\r\nFirmware Vulnerabilities, unencrypted firmware and vulnerable bootloaders can lead to unauthorized modifications\r\nInsufficient Physical Security, need to secure physical access to USB ports and other interfaces to prevent data theft or tampering \r\nInsecure Supply Chain, compromised components from suppliers can introduce hidden vulnerabilities\r\nUnsecured Third Party Components, third-party software libraries and components can compromise drone security\r\nInadequate Logging and Monitoring, insufficient monitoring of drone operations can delay the detection of security breaches or operational anomalies\r\nInsecure Data Storage, sensitive data stored on drones can be accessed if not encrypted\r\nSpoofing and Replay Attacks, GPS or ADS-B data spoofing or command replay attacks could mislead or take control of the drone\r\nRF Interference and Jamming, drones can be disrupted or controlled through intentional RF interference or jamming\r\nSensor Vulnerabilities, cameras, GPS and other sensors can be exploited to feed incorrect data to the drone systems.\r\nCloud Storage and Data Management Vulnerabilities, inadequate security controls for drone data stored in the cloud (e.g., videos, logs, images) can lead to unauthorized access and data breaches\r\nEnd of Life Decommissioning Risks, inadequately secured decommissioning processes can leave residual data accessible, or hardware could be reused maliciously\r\nInteroperability and Integration Issues, integrating various systems and technologies without a cohesive security strategy can introduce vulnerabilities, e.g. web servers on cameras\r\nThird Party Services and API Security, external APIs used by drones or GCS might be insecure, providing a pathway for attacks\r\nUser Error and Misconfiguration, incorrect configuration of drone systems by users can expose them to risks of unauthorized access or malfunction\r\n\r\n## What is the objective of the Cheat Sheet?\r\n\r\nTo provide developers working on mobile apps, websites, cloud systems and firmware for drones to understand the wide ranging risks.\r\n\r\n## What other resources exist in this area?\r\n\r\nhttps://dronewolf.darkwolf.io/intro\r\nhttps://github.com/nicholasaleks/Damn-Vulnerable-Drone\r\nhttps://github.com/dhondta/dronesploit\r\nhttps://github.com/jezzab/DUMLdore\r\n\r\n\r\n\r\n\r\n", + "summary": "The proposed Cheat Sheet focuses on Drone Security, aiming to promote the safe operation of unmanned aerial vehicles (UAVs) across various platforms. It will address common security issues such as insecure communication links, weak authentication mechanisms, firmware vulnerabilities, and insufficient physical security, among others. The objective is to equip developers with an understanding of the risks associated with mobile apps, websites, cloud systems, and firmware for drones. \n\nIn terms of next steps, compiling detailed guidelines and best practices to mitigate the identified vulnerabilities would be beneficial. Additionally, referencing existing resources on drone security could enhance the Cheat Sheet's effectiveness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1412", @@ -20292,6 +20984,7 @@ "node_id": "I_kwDOCbL1IM6RO6vo", "title": "Update: JSON_Web_Token_for_Java_Cheat_Sheet", "body": "## What is missing or needs to be updated?\r\nThe [Token Storage on Client Side](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.md#token-storage-on-client-side) page recommends using Session Storage over Local Storage, seemingly because Local Storage persists between browser restarts (accurate), however it makes no mention of the fact that Session Storage is implemented per-tab and not per-site. That is, if example.com adds some item to Session Storage, and a user then opens a new tab through any method other than \"duplicate tab\" (e.g. right-click -> open link in new tab), the new tab will have a completely new (and empty) Session Storage instance, even if the user goes to example.com.\r\n\r\nSince opening things in multiple tabs is pretty common user behavior, it is not realistic (IMO) for the recommended method to be implemented by developers.\r\n\r\n## How should this be resolved?\r\nHonestly, I think the recommendation should be to use Local Storage but with tokens that have short expiration times. There's no perfect solution, but Session Storage doesn't work at all so it's surprising to see it being recommended.", + "summary": "The issue discusses the recommendation of using Session Storage over Local Storage for token storage on the client side in the JSON Web Token for Java Cheat Sheet. It highlights a significant limitation of Session Storage, which is that it is implemented per tab rather than per site, leading to potential confusion for users who open multiple tabs. The author suggests that the recommendation should be revised to favor Local Storage with tokens that have short expiration times, as Session Storage does not adequately meet the needs of typical user behavior. \n\nTo address this, the recommendation in the cheat sheet should be updated to reflect these concerns and propose a more suitable solution for token storage.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1458", @@ -20322,6 +21015,7 @@ "node_id": "I_kwDOCbL1IM6Vh3Gd", "title": "Update: Session Management Cheat Sheet - Broken \"Session Fixation\" Black Hat Resource Links", "body": "\r\n\r\n## What is missing or needs to be updated?\r\nThe two links included under the [Transport Layer Security](https://cheatsheetseries.owasp.org/cheatsheets/Session_Management_Cheat_Sheet.html#transport-layer-security) section of the _Session Management Cheat Sheet_ (which presumably provide examples and/or more detailed explanation) regarding Session Fixation attacks are broken. They're suppose to link to some Black Hat EU presentation slides. Instead when you open either of them, you receive an \"Access Denied\" response from media.blackhat.com.\r\n\r\n> The usage of an encrypted communication channel also protects the session against some session fixation attacks where the attacker is able to intercept and manipulate the web traffic to inject (or fix) the session ID on the victim's web browser (see [here](https://media.blackhat.com/bh-eu-11/Raul_Siles/BlackHat_EU_2011_Siles_SAP_Session-Slides.pdf) and [here](https://media.blackhat.com/bh-eu-11/Raul_Siles/BlackHat_EU_2011_Siles_SAP_Session-WP.pdf)).\r\n\r\n## How should this be resolved?\r\nI see two options\r\n1. The links could be removed entirely. \r\n2. The links could be replaced with an updated permalink to the intended slides or with alternative, but equivalent resource references.\r\n\r\nPersonally, I think Option 2 is more beneficial, especially as someone who was hoping to use the linked resources to better understand Session (Cookie) Fixation attacks.\r\n", + "summary": "The issue reports that two links in the \"Transport Layer Security\" section of the Session Management Cheat Sheet related to Session Fixation attacks are broken, leading to an \"Access Denied\" response. The suggestions for resolution include either removing the links or replacing them with updated links or alternative resources. The preference leans towards the second option, as it would provide users with valuable information on Session Fixation attacks. The necessary action involves either sourcing new links or finding equivalent resources to include in the cheat sheet.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1481", @@ -20353,6 +21047,7 @@ "node_id": "I_kwDOCbL1IM6VrEwf", "title": "Update: Threat_Modeling_Cheat_Sheet", "body": "\r\n\r\n## What is missing or needs to be updated?\r\n\r\nAdding reference tools and training\r\n\r\n\r\n## How should this be resolved?\r\n\r\n\r\nAdd a tool: Threat composer \r\n[Demo](https://awslabs.github.io/threat-composer), [Repo](https://github.com/awslabs/threat-composer/)\r\n\r\nAdd AWS' free online training: \r\nThreat modeling for builders, available on [AWS Workshop Studio](https://catalog.workshops.aws/threatmodel/en-US), and [AWS SkillBuilder](https://explore.skillbuilder.aws/learn/course/external/view/elearning/13274/threat-modeling-for-builders-workshop)\r\n\r\n\r\n", + "summary": "The issue highlights the need to update the Threat Modeling Cheat Sheet by including reference tools and training resources. Specifically, it suggests adding the tool \"Threat Composer\" along with links to its demo and repository. Additionally, it recommends including AWS's free online training titled \"Threat Modeling for Builders,\" available on AWS Workshop Studio and AWS SkillBuilder. To resolve this, the Cheat Sheet should be updated to incorporate these resources.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/CheatSheetSeries/issues/1482", @@ -20380,10 +21075,11 @@ "pk": 712, "fields": { "nest_created_at": "2024-09-11T21:11:53.800Z", - "nest_updated_at": "2024-09-11T21:11:53.800Z", + "nest_updated_at": "2024-09-13T16:11:31.302Z", "node_id": "I_kwDOCbLy9s51Pldf", "title": "Error Deploying DVSA", "body": "When trying to deploy DVSA I encountered an error with the S3 Bucket Permissions, I kept getting 403s, I believe I have fixed the issue by fixing the references to the S3 buckets. I believe since the buckets referenced in the policies were done by a function there was a race condition where the permission policy would attempt to be created before the S3 bucket was finished creating.\r\n\"Untitled\"\r\n\r\n I swapped the reference to the actual buckets in the Cloudformation template and it appears to be working now. \r\n \r\n \r\n![Capture](https://github.com/OWASP/DVSA/assets/2498490/a8d758d2-7d3f-451b-a28f-b113a3641c24)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/DVSA/issues/49", @@ -20410,6 +21106,7 @@ "node_id": "MDU6SXNzdWU0NDg5NDIxOTg=", "title": "Support other FaaS providers?", "body": "Are there plans to support other providers like KNative or Azure functions? I prefer to be able to run this locally without deploying on a cloud, both KNative and Azure functions support that.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Goat/issues/6", @@ -20436,6 +21133,7 @@ "node_id": "MDU6SXNzdWU0ODUwMTQ2NDE=", "title": "AWS Serverless Application Repository is", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Goat/issues/8", @@ -20462,6 +21160,7 @@ "node_id": "MDU6SXNzdWU1MDAxMjQzNjI=", "title": "Do we have the Java, Python or C# version for Serverless-Goat", "body": "Do we have the Java, Python or C# version for Serverless-Goat? How to get these versions?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Goat/issues/11", @@ -20488,6 +21187,7 @@ "node_id": "MDU6SXNzdWU1MDU2NDQyODI=", "title": "The URL is wrong when I click the convert button", "body": "I click the convert button, I get the error message \"{message: Forbidden}\"\r\n\r\nI check the URL, I found the URL is not correct. The Submit URL is missing the stage.\r\n\r\nThe code in the index.html:\r\n\r\n Enter a URL of a Word Doc (.doc) file to convert:\r\n \r\n \r\n\r\n\r\n\r\nThe frondend URL:\r\nhttps://****.execute-api.us-east-1.amazonaws.com/Prod\r\nThe Submit URL:\r\nhttps://***.execute-api.us-east-1.amazonaws.com/api/convert?document_url=https%3A%2F%2Fwww.puresec.io%2Fhubfs%2Fdocument.doc\r\n\r\nThe correct URL:\r\nhttps://***.execute-api.us-east-1.amazonaws.com/**Prod**/api/convert?document_url=https%3A%2F%2Fwww.puresec.io%2Fhubfs%2Fdocument.doc", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Goat/issues/13", @@ -20514,6 +21214,7 @@ "node_id": "MDU6SXNzdWU1Mzc1MTU1NDI=", "title": "Aws node8 depreciation ", "body": "Hi. Will serverless goat be affected by AWS deprecating node8.10 in lambda?\r\nI think deprecation will start in jan 2020", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Goat/issues/16", @@ -20540,6 +21241,7 @@ "node_id": "MDU6SXNzdWU5Mzk3NzU0MDk=", "title": "message forbidden error", "body": "trying to learn the command injection attacks in serverless goat, but I am facing a message forbidden error. can any help me through", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Goat/issues/20", @@ -20562,10 +21264,11 @@ "pk": 719, "fields": { "nest_created_at": "2024-09-11T21:12:03.312Z", - "nest_updated_at": "2024-09-11T21:12:03.312Z", + "nest_updated_at": "2024-09-13T16:11:34.823Z", "node_id": "I_kwDOCapMk85JcUou", "title": "Application not found.", "body": "I tried visiting [AWS Serverless Application Repository](https://serverlessrepo.aws.amazon.com/applications/arn:aws:serverlessrepo:us-east-1:761130837472:applications~serverless-goat) from the README.md but was given the following error. Was the application migrated?\r\n\r\n---\r\nThe application you were looking for could not be found. [Return to the applications search page](https://serverlessrepo.aws.amazon.com/applications).\r\n\r\n---", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Goat/issues/21", @@ -20588,10 +21291,11 @@ "pk": 720, "fields": { "nest_created_at": "2024-09-11T21:12:11.637Z", - "nest_updated_at": "2024-09-11T21:12:11.637Z", + "nest_updated_at": "2024-09-13T16:11:41.543Z", "node_id": "MDU6SXNzdWUzODY1MTgxMTc=", "title": "What is purpose of this repo?", "body": "Saw this new repo and that it's active.\r\n\r\nWhat is its overall purpose? \r\n\r\nJust curious as it looked interesting. Thanks.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Cloud-Security-Mentor/issues/1", @@ -20614,10 +21318,11 @@ "pk": 721, "fields": { "nest_created_at": "2024-09-11T21:12:20.047Z", - "nest_updated_at": "2024-09-11T21:12:20.047Z", + "nest_updated_at": "2024-09-13T16:11:48.201Z", "node_id": "MDU6SXNzdWUzNjY3OTE0NzU=", "title": "Add images to owasp-chapters open letter", "body": "page is now here https://github.com/OWASP/open-letters-to-owasp/blob/master/from-owasp-chapters/04-Oct-2018.md\r\n\r\nImages and original letter is here https://docs.google.com/document/d/14ggTe7yx4ddDanNKDuwnvotI9Dz9v4j-5P-27gboEIs/edit", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/open-letters-to-owasp/issues/1", @@ -20644,6 +21349,7 @@ "node_id": "MDU6SXNzdWU0NTkzNTgwNTY=", "title": "Business Justification", "body": "Hi Team,\r\nMy Manger has asked me to draft a document justifying sedated in our business \r\n\r\nI provided him this \r\n-Sedated is trigger by GitHub hook when code is committed to the repository it checks for sensitive information and rejects the commit if found this protects sensitive information from being viewed by unauthorized users.\r\n\r\n\r\nBut he would like more information.\r\nI am not real familiar with Sedated \r\nDoes any one have a resource or reference to help me out \r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SEDATED/issues/7", @@ -20670,6 +21376,7 @@ "node_id": "MDU6SXNzdWU0NzIwMjk3NzE=", "title": "Love what your doing, here's more regex", "body": "I love what you guys are doing on owasp sedated. if you are looking to increase the number of regex findings that you have you might also look at the gitrob tool\r\nhttps://github.com/michenriksen/gitrob/blob/master/core/signatures.go", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SEDATED/issues/10", @@ -20696,6 +21403,7 @@ "node_id": "MDU6SXNzdWU1MDcxNjQ2NTM=", "title": "Whitelisting keywords or specific lines", "body": "Hi @SimeonCloutier & team\r\n\r\nThanks for the great product. We are using SEDATED in our organization. \r\n\r\nWe have some customization's on whitelisting some specific keywords or skipping lines of code for scanning for false positives. Is any specific work going on here or do you have any suggestions on this ?\r\n\r\nThanks.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SEDATED/issues/11", @@ -20722,6 +21430,7 @@ "node_id": "MDU6SXNzdWU1OTc1ODU1NzA=", "title": "BUG: Github UI always has a different commitId - not allowing for False Positive Processes", "body": "This issue is each commit within the UI has a new commitId attached to it so there is no way for a user to report the false positive and continue on if they are using a frontend workflow.\r\n\r\nSteps to reproduce\r\n- User starts by editing a file within Github UI\r\n- User adds a secret (a false positive) within the UI and attempts to commit to master - doing so provided push rejected feedback to the user with a commit Id which is to be reported as a false positive via the commit whitelist file\r\n- User adds the provided commitId from the feedback in the whitelist file\r\n- User again attempts to commit to master within the UI - user will again receive push rejected feedback\r\n\r\nThe only workaround we could think of was whitelisting the whole repo making it never be scanned until it was removed from that whitelist.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SEDATED/issues/15", @@ -20748,6 +21457,7 @@ "node_id": "MDU6SXNzdWU2MDQyMTE0MTI=", "title": "5sec Timeout - Performance Issue", "body": "Hi Team, \r\n\r\nWe have implemented SEDATED in our Organization and it's been very successful. Thank you for this. One challenge we are facing is on the performance, many times, even with medium sized changes in the push or when we push the changes to Master branch, we easily end up with 5sec rule timeout. For now, we are whitelisting the repo till the push is complete and revert it. \r\n\r\nAny recommendations or better work around to deal with this issue, as the current process is slowing down or blocking the development process even when there is no sensitive data in the code. \r\n\r\nThank you\r\nSagar", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SEDATED/issues/16", @@ -20774,6 +21484,7 @@ "node_id": "MDU6SXNzdWU2NDMyMzcxNzg=", "title": "Repo and commit id whitelisting stopped working", "body": "Hi @SimeonCloutier @denniskennedy and Team,\r\n\r\nI quickly wanted to reach out to your guys and seek suggestions. We are using sedated extensively in our organization and we started facing issue from last Friday, where repo and commit id whitelisting stopped working. I had about 850 commit id's and 50 repos been whitelisted, but now new commit id's or repo's whitelisting does not take effect. As a pre-receive hook, its working fine but when something is blocked, unable to whitelist to move forward. \r\n\r\nNote: We use whitelisting repo to get around the 5sec rule issue. When teams try to push a big change, they usually get blocked. \r\n\r\nCan you help me troubleshoot this issue further. Thank you,\r\n\r\nSagar", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SEDATED/issues/18", @@ -20800,6 +21511,7 @@ "node_id": "MDU6SXNzdWU2NDMzOTE1OTA=", "title": "Using SEDATED to scan git history", "body": "I just discovered this tool and it looks fantastic! I am wondering how difficult it would be to pipe data from `git` to SEDATED to spot **existing** commits that have matches before implementing it as a pre-commit hook. Does this seem feasible?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SEDATED/issues/19", @@ -20826,6 +21538,7 @@ "node_id": "MDU6SXNzdWU2NjM1MzcwMjQ=", "title": "Why no use GitLeaks?", "body": "As a suggestion, instead of reinvent the wheel and create rules and regex from scratch, why no use Gitleaks as part of the project and inherit all their capabilities and builtin rules?\r\n\r\nhttps://github.com/zricethezav/gitleaks\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SEDATED/issues/21", @@ -20852,6 +21565,7 @@ "node_id": "MDU6SXNzdWU2OTg0OTA1NDc=", "title": "Reverting a PR - Github Enterprise responds with a 500 when changes include a regex match", "body": "A team was attempting to revert a PR via the github UI. Within the PR there was a removal of 2 lines that matched regexes (in this case they happened to be test secrets). When the team attempted the revert in the PR they get an error page with Github Enterprise responding with a 500 response. The error didn't provide any feedback from the SEDATED script but upon reaching out to Github Support we were able to retrieve the SEDATED feedback to see what was happening. \r\n\r\nThe failure reason makes sense, code that includes a regex match is attempting to be added back to the source code. But the team didn't receive any helpful information to resolve the issue. \r\n\r\nHas anyone else run into this? Is there an expected workaround in this case?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SEDATED/issues/22", @@ -20874,10 +21588,11 @@ "pk": 731, "fields": { "nest_created_at": "2024-09-11T21:12:32.146Z", - "nest_updated_at": "2024-09-11T21:12:32.146Z", + "nest_updated_at": "2024-09-13T16:11:51.775Z", "node_id": "MDU6SXNzdWU4OTk4MzUzNTY=", "title": "Regex Improvement: False Positive while using a series of asterisks", "body": "Here are some false positive examples:\r\n- `ssl.truststore.password=*********`\r\n- `password=*********`\r\n- `password:*********`\r\n- `pass=********`\r\n- `secret=*********`\r\n- `secret:*********`\r\n\r\nFor now of course we've just recommended utilizing an all text fake password but it'd be nice if these weren't flagged.\r\n\r\nSomehow these could be baked into the regexes but we haven't done work for that yet. If they were able to be included and no longer blocked it'd be important the `*` is still recognized as a special character.\r\n\r\n\r\nIt seems like this would be another good candidate for some type of regex acceptlist as mentioned [here](https://github.com/OWASP/SEDATED/issues/11) in order to not introduce more complexity into the regexes as well as handle false positives in the future.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SEDATED/issues/25", @@ -20904,6 +21619,7 @@ "node_id": "MDU6SXNzdWUzOTM2MjQ3MDQ=", "title": "D04 - Secure Defaults", "body": "Am going to slowly add to this issue, and then eventually merge into repo:\r\n\r\n1. `apt install apt-transport-https`\r\n\r\nI think it's important to use TLS for all package installations. You'll have to have to separate install statements, which can cause slight increase in image size but is worth it.\r\n\r\n2. `apt-get --no-install-recommends -y install libtool`\r\n\r\nreduce surface area of attack by not installing extra packages during installation of packages too.", + "summary": "The issue discusses the need for secure defaults in package installations. It suggests installing `apt-transport-https` to ensure TLS is used for package installations, despite the potential increase in image size due to separate install statements. Additionally, it recommends using `apt-get --no-install-recommends` to install `libtool`, thereby minimizing the surface area for attacks by avoiding unnecessary extra packages. Action items include implementing these installation practices.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/2", @@ -20930,6 +21646,7 @@ "node_id": "MDU6SXNzdWU0MjI4MzEwNzY=", "title": "Trailing page(s) of document", "body": "\r\nThis is just a reminder that a) trailing page(s) would do good b) that then maybe the naming scheme needs to be readjusted .", + "summary": "The issue highlights the need for adding trailing pages to a document and suggests that the naming scheme may require adjustment as a result. It implies that modifications should be made to improve the document's structure and organization.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/4", @@ -20958,6 +21675,7 @@ "node_id": "MDU6SXNzdWU0NjMxMDQ2OTE=", "title": "Addition to the threat mindmap might be needed", "body": "Breaking out of a container might not only be achieved by root processes or (ab)use cases of SETUID/SETGID, but through risky bind mounts of the host file system, too.\r\nUID 0 might help with additional permissions in such a scenario, but i'd argue it would still be considered a separate point.\r\n\r\nThe docker docs even acknowledge it [here](https://docs.docker.com/storage/) (search page for \"security implications\").\r\n\r\nSide note: Most english sources call it container breakout instead of container outbreak.\r\nUsing your favourite search engine with both terms will demonstrate the difference in search result quality.", + "summary": "The issue suggests that the threat mindmap should be updated to include the risk of container breakout through risky bind mounts of the host file system, in addition to root processes and SETUID/SETGID abuses. It points out that while UID 0 can provide additional permissions, this scenario should be considered a separate point. The author also notes a terminology inconsistency, recommending the use of \"container breakout\" over \"container outbreak\" for better search results. \n\nTo address this issue, the mindmap should be revised to incorporate these additional insights on container security.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/9", @@ -20984,6 +21702,7 @@ "node_id": "MDU6SXNzdWU0OTgxNzc0ODA=", "title": "Add year of document release to numbering scheme", "body": "While I found a clash of the numbering scheme of OWASP Top 10 and the API Top 10 (https://github.com/OWASP/API-Security/issues/24) I accidentially realized that the Docker Top 10 lack the year of release in their numbering scheme.\r\n\r\n## Recommendation\r\n\r\nUse `D1:2019` instead of `D1` in the summary table and when referring to individual list entries. This would harmonize Top 10 lists of OWASP overall a bit.", + "summary": "The issue highlights a lack of consistency in the numbering scheme of the Docker Top 10 list, specifically the absence of the release year in its entries. It proposes that the format be updated to include the year, such as using `D1:2019` instead of just `D1`. This change aims to create a more uniform presentation across all OWASP Top 10 lists. Implementing this recommendation would involve updating the summary table and individual list entries to reflect the new format.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/13", @@ -21012,6 +21731,7 @@ "node_id": "MDU6SXNzdWU1MDAwNzczNTg=", "title": "Image Scanning in D02", "body": "Hi *,\r\n\r\nI could need some help wrt to image scanning for known vulnerabilities, see D02 --> How can I find out? --> Automatic.\r\n\r\nPreferably short and \"crispy\"\r\n\r\nCheers, Dirk\r\n ", + "summary": "The issue discusses the need for assistance with automatic image scanning for known vulnerabilities in the D02 context. A concise and effective solution or guidance is requested to address this scanning process. It may be beneficial to explore existing tools or methods that facilitate automated vulnerability scanning of images.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/14", @@ -21040,6 +21760,7 @@ "node_id": "MDU6SXNzdWU1MjAwMjI5MjM=", "title": "Rendered PDF seems to have broken letter spacing, makes reading a lot less enjoyable", "body": "Hi! I found the rendered `dist/owasp-docker-security.pdf` to be rather unpleasant on the eyes when it comes to reading the actual text, probably mostly to the unnatural letter spacing, for example:\r\n\r\n![owasp_docker_security_pdf_letter_spacing](https://user-images.githubusercontent.com/1577132/68481007-68534a80-0236-11ea-84b8-9677c92e8288.png)\r\n\r\nSame results with evince and okular. GitHub's rendering at https://github.com/OWASP/Docker-Security/blob/master/dist/owasp-docker-security.pdf seems to have the same issue.", + "summary": "The rendered PDF of the OWASP Docker Security document has issues with letter spacing, making it difficult to read. This problem persists across various PDF viewers. To improve readability, it may be necessary to adjust the letter spacing settings in the document's formatting.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/17", @@ -21068,6 +21789,7 @@ "node_id": "MDU6SXNzdWU1MjAwNDA2OTk=", "title": "[D01] Issues with relying on (or advertising) Docker instruction \"USER [:]\"", "body": "The [current text](https://github.com/OWASP/Docker-Security/blame/master/D01%20-%20Secure%20User%20Mapping.md#L21) mentions:\r\n> Then, before you start the microservice, the `USER ` [3] switches to this user.\r\n\r\nWhile that is true and might be of help while building an image, it is my understanding that it would be a mistake to let the image tell the operator what user to use unless we have full control over the the image we run. E.g. you could drop line `USER node` from [Dockerfile](https://github.com/OWASP/Docker-Security/blob/master/Dockerfile#L11) and when I rebuild or pull the image again next time, I'd start running the image as user `root`. (If I had a line in `docker-compose.yml` or my Docker command line running the image, I would be safe against that kind of change.)\r\n\r\nWhat do you think?", + "summary": "The issue raises concerns about the reliance on the Docker instruction \"USER [:]\" for setting the user context within Docker images. It argues that allowing an image to dictate which user to run can be risky unless there is complete control over the image. This could lead to unintended security vulnerabilities, especially if the specified user is changed or removed, potentially reverting to the root user. The discussion suggests that operators should ensure they explicitly define user settings in their configurations, such as `docker-compose.yml` or command line arguments, to maintain security. A review and revision of the documentation to address this concern may be necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/20", @@ -21094,6 +21816,7 @@ "node_id": "MDU6SXNzdWU3MDU0ODIzMjc=", "title": "Other threats (+testing guide)", "body": "Hi\r\n\r\nI have some other threats to add to this (good) list\r\n\r\n- Untrusted base images\r\n- Supply chain poisoning\r\n- This is not related to docker itself, but it might be good to add Kubernetes issues too (maybe a kubernetes top 10 is too much)\r\n\r\nI dont know if those qualify for the top 10, but for sure in a docker security guide. \r\n\r\nWould you be accepting a PR where i add those? I have contributed before to the mobile testing guide and i will be glad to contribute here too :)", + "summary": "A user suggests adding additional threats to an existing list, including untrusted base images and supply chain poisoning, as well as considering Kubernetes-related issues. They express their willingness to submit a pull request to incorporate these threats into a Docker security guide. A response should clarify the acceptance of contributions and the process for submitting the proposed changes.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/23", @@ -21120,6 +21843,7 @@ "node_id": "MDU6SXNzdWU3NzU1OTc3NjY=", "title": "Translation to Brazilian Portuguese and Contributions", "body": "Hello there,\r\n\r\nI'm just have started a translation for this awesome project do Brazilian Portuguese. I'm a leader for Belo Horizonte Chapter and we just fork this project to do this translation apart and we send a pull request when apropriate.\r\n\r\nAs WIP I consider a great oportunity to stay close translating it at the same time this projects gets in shape. I'm also have interest to contribute in some way, just let me know. This container rush had a impact on security so I consider this project a must have.\r\n\r\nGreetings from Brazil. \r\nHappy holidays.", + "summary": "A user has initiated a translation of a project into Brazilian Portuguese and is currently working on it as a fork. They express a willingness to contribute further and are open to communication regarding potential contributions. The user emphasizes the importance of the project, particularly in light of recent security concerns related to containerization. It may be beneficial to engage with them regarding collaboration and integration of the translation efforts.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/28", @@ -21146,6 +21870,7 @@ "node_id": "MDU6SXNzdWU3NzU5MzYyMjI=", "title": "Fix or replace gitbook (in Dockerfile)", "body": "\r\nJust building a new PDF since a while and I was confronted with this ugly message:\r\n\r\n![image](https://user-images.githubusercontent.com/8036727/103292516-3b3f9980-49ee-11eb-923c-41f260f038a5.png)\r\n\r\n\r\nProbably the threat is not high. But for a secuirty project it's just ugly. Can you @PauloASilva or sombody else give a hand here?\r\nGitbook-cli supported is is no longer under active development. as stated here: https://github.com/GitbookIO/gitbook", + "summary": "The issue highlights a problem with the use of gitbook in the Dockerfile, specifically related to its lack of active development and potential security concerns. The user encountered an unpleasant warning message while building a new PDF. It is suggested that the gitbook-cli should either be fixed or replaced with an alternative. Collaboration from others in the project is requested to address this matter.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/31", @@ -21174,6 +21899,7 @@ "node_id": "MDU6SXNzdWU3NzU5NzgzMDY=", "title": "PDF generation: Replacement for gitbook-cli (and maybe calibre)?", "body": "\r\nBecause of several issues with gitbook-cli, its status, and calibre issues I am seeking for a replacement or help with a solution which works.\r\n\r\nThe goal is:\r\n* to enable almost everybody to build or get a PDF\r\n* should work at least under every Linux distro \r\n* it should be maintainable (no private or deprecated repos)\r\n\r\nSee also #17, #31, #32", + "summary": "The issue discusses the need for a replacement for gitbook-cli and possibly calibre due to various problems encountered with these tools. The goal is to find a solution that allows users to easily create PDFs across all Linux distributions while ensuring maintainability and avoiding reliance on private or deprecated repositories. It may be beneficial to explore alternative tools or methods that meet these criteria.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/33", @@ -21202,6 +21928,7 @@ "node_id": "MDU6SXNzdWU3NzYwNzI5MTU=", "title": "Create copyright and license section", "body": "\r\nexample https://github.com/OWASP/Docker-Security/issues/18#issuecomment-552101557 of Application Top 10 ", + "summary": "The issue requests the addition of a copyright and license section to the project's documentation. It references an example from another repository to guide the implementation. To address this, the relevant copyright information and licensing details should be drafted and included in the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/36", @@ -21228,6 +21955,7 @@ "node_id": "MDU6SXNzdWU5MTE0MTU4MTg=", "title": "D02 - Patch Management Strategy Suggestion", "body": "Suggestion to include guidance on tracking the components in your base image, and your own bundled software, as part of D02.\r\n\r\nThere are tools like Anchore Syft that can generate a software bill of materials for container images. This information can be fed into tools like OWASP Dependency-Track for continuous analysis. And identification of vulnerable components.\r\n\r\nIt also helps address OWASP Top 10 A9:2017-Using Components with Known Vulnerabilities, and activities identified in the OWASP SCVS.", + "summary": "The issue suggests enhancing the D02 - Patch Management Strategy by incorporating guidance on tracking components within base images and bundled software. It highlights the use of tools like Anchore Syft to generate software bills of materials for container images, which can then be utilized with OWASP Dependency-Track for ongoing vulnerability analysis. This addition would also aid in addressing OWASP Top 10 A9:2017 regarding known vulnerabilities and align with activities from the OWASP Software Component Verification Standard (SCVS). Implementing these suggestions could improve the overall security posture related to component management.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/46", @@ -21250,10 +21978,11 @@ "pk": 745, "fields": { "nest_created_at": "2024-09-11T21:13:03.123Z", - "nest_updated_at": "2024-09-12T02:41:01.793Z", + "nest_updated_at": "2024-09-13T16:25:29.018Z", "node_id": "I_kwDOCMFoCc5vxONL", "title": "D06,D08,D09,D10 content lost", "body": "D06,D08,D09,D10 content lost? why? someone delete or may I do something to recompile them?", + "summary": "The issue reports that content for D06, D08, D09, and D10 has been lost, raising concerns about whether it was deleted intentionally or if there's a way to recover or recompile the missing content. It suggests investigating the cause of the loss and exploring potential recovery methods.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Docker-Security/issues/50", @@ -21280,6 +22009,7 @@ "node_id": "MDU6SXNzdWUzODE1OTIzMjM=", "title": "Refine future work", "body": "We've just released the initial report, so far the `Future work` section contains two items:\r\n\r\n1. Open call to get more data from organizations and practitioners\r\n2. Development of Damn Vulnerable Serverless Application (DVSA)\r\n\r\nBoth of those could be defined more precisely, split into more fine-grained work items and milestones.\r\n\r\n", + "summary": "The current `Future work` section of the initial report includes an open call for data from organizations and practitioners, as well as the development of a Damn Vulnerable Serverless Application (DVSA). To enhance clarity and direction, these items should be refined by providing more detailed definitions, breaking them down into smaller, actionable work items, and establishing specific milestones.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Top-10-Project/issues/6", @@ -21302,10 +22032,11 @@ "pk": 747, "fields": { "nest_created_at": "2024-09-11T21:13:12.343Z", - "nest_updated_at": "2024-09-12T02:31:39.923Z", + "nest_updated_at": "2024-09-13T16:26:58.132Z", "node_id": "MDU6SXNzdWUzODE1OTI5NjU=", "title": "How to collaborate?", "body": "As of today @nu11p0inter is the leader of the project and handles most of the work himself (at least that is my impression). I think the project could benefit from:\r\n\r\n1. Defining roles and responsibilities that we need to complete the project\r\n2. Get volunteers to sign up for ☝️ \r\n3. Define how to collaborate, set up periodic calls for project members and general public to participate and how to track progress", + "summary": "The issue discusses the need for improved collaboration within the project. It suggests defining roles and responsibilities to enhance teamwork, encouraging volunteers to get involved, and establishing a clear collaboration framework. Additionally, it proposes organizing periodic calls for project members and the public to facilitate participation and to track progress effectively. Implementing these suggestions could lead to more organized and efficient project development.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Top-10-Project/issues/7", @@ -21334,6 +22065,7 @@ "node_id": "MDU6SXNzdWUzODIxNTgzNTA=", "title": "What risk rating scale to use?", "body": "The OWASP Top 10 project uses the [OWASP Risk Rating Methodology](https://www.owasp.org/index.php/OWASP_Risk_Rating_Methodology) to rank risks on the list.\r\n\r\nIt looks like a good idea for the Serverless Top 10 project as well. I feel use of this risk rating scale needs to be explicitly evaluated for fit in the serverless context and clearly stated in the final report.\r\n\r\nIf we reach a consensus that this is a good risk rating scale, we can evaluate OWASP Top 10 risks in the serverless context, replacing the `Serverless Risk Meter` from the original report.", + "summary": "There is a discussion regarding the applicability of the OWASP Risk Rating Methodology for the Serverless Top 10 project. The suggestion is to evaluate its relevance within the serverless context and to explicitly state its use in the final report. If it is deemed suitable, the plan would be to replace the existing `Serverless Risk Meter` with this methodology when assessing OWASP Top 10 risks in that context. It may be beneficial to gather consensus on this approach.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Top-10-Project/issues/8", @@ -21362,6 +22094,7 @@ "node_id": "MDU6SXNzdWUzODIxNjYyMTQ=", "title": "How to promote the project?", "body": "Promotion is necessary for the Serverless Top 10 project to have the desired impact. Possible ways to promote the project are:\r\n\r\n1) Blogging and tweeting,\r\n2) Conference talks,\r\n3) Podcasts,\r\n4) Webinars,\r\n5) Promotional videos.\r\n\r\nIt would be good to coordinate promotion efforts to maximize visibility of the project in relevant communities, both serverless and AppSec.\r\n\r\nPromotion is also important for gathering enough vulnerability and threat data in the open call.", + "summary": "The issue discusses the need for promoting the Serverless Top 10 project to enhance its impact. Suggested promotional strategies include blogging, tweeting, giving conference talks, participating in podcasts, hosting webinars, and creating promotional videos. Coordinating these efforts is important to increase visibility within relevant communities, particularly in serverless and application security (AppSec). Additionally, effective promotion is essential for collecting sufficient vulnerability and threat data during the open call. To move forward, a coordinated plan for these promotional activities should be developed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Top-10-Project/issues/10", @@ -21390,6 +22123,7 @@ "node_id": "MDU6SXNzdWUzODMwNjU0NTA=", "title": "Should we organize a working session during the Open Security Summit?", "body": "The [Open Security Summit](https://open-security-summit.org/) is a face-to-face gathering that allows application security practitioners to collaborate.\r\n\r\nThere are several tracks and I think two of them could be good to host a session focused or related to the Serverless Top 10 project.\r\n\r\nI don't see any downsides in trying to organize such a session but I'd love to get some feedback.", + "summary": "The discussion revolves around the potential organization of a working session during the Open Security Summit, specifically targeting the Serverless Top 10 project. The author believes that this could be beneficial and is seeking feedback on the idea. It would be helpful to gather opinions on the feasibility and interest in hosting such a session.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Top-10-Project/issues/11", @@ -21418,6 +22152,7 @@ "node_id": "MDU6SXNzdWUzODQzNDM2MzI=", "title": "Should we add broken multi-tenancy and side-channel vulnerabilites to Other Risks?", "body": "Serverless environments are inherently multi-tenant environments. If the multi-tenant isolation mechanisms were broken, this would be a very serious attack vector on serverless applications.\r\n\r\nSimilarly, side-channel attacks (such as Meltdown) pose a serious risk to secrets processed by serverless applications and the integrity of the processing logic itself.", + "summary": "The discussion revolves around whether to include broken multi-tenancy and side-channel vulnerabilities in the \"Other Risks\" section. It highlights that serverless environments are multi-tenant, and any failure in isolation mechanisms could lead to significant security risks. Additionally, side-channel attacks like Meltdown threaten both the confidentiality of secrets and the integrity of processing in serverless applications. \n\nTo address this, it may be necessary to evaluate and potentially expand the risk documentation to incorporate these vulnerabilities.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Top-10-Project/issues/12", @@ -21446,6 +22181,7 @@ "node_id": "MDU6SXNzdWUzOTExMjU0ODM=", "title": "Discourse as the discussion forum for the project", "body": "The OWASP Foundation is currently testing Discourse as a replacement for mailing lists. Since the mailing list for the OWASP Serverless Top 10 has never took off, this is good opportunity to start a discussion forum.\r\n\r\nThings to be done:\r\n- [ ] Provide a description of the category\r\n- [ ] Add link to the project wiki page\r\n- [ ] Announce on Slack", + "summary": "The OWASP Foundation is exploring the use of Discourse as a replacement for mailing lists, particularly for the OWASP Serverless Top 10, which has struggled to gain traction. To move forward, the following tasks need to be completed: provide a description for the discussion category, add a link to the project wiki page, and make an announcement on Slack.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Top-10-Project/issues/13", @@ -21474,6 +22210,7 @@ "node_id": "MDU6SXNzdWU4MTQ0MTYyOTI=", "title": "Incorrect variable name in sample code for injection", "body": "\r\n[The image](https://github.com/OWASP/Serverless-Top-10-Project/blob/master/2018/en/images/0x01-injection-2.png) showing an example code for Injection attack II contains an errornous variable name.\r\n\r\nIn the first call to `subprocess.call()` the variable `file_path` is used. But this variable is never defined, in all other lines `fpath` is used instead.\r\n\r\nTo conclude, the single occurence of `file_path` should be replaced by `fpath`.\r\n\r\n", + "summary": "The issue reports an incorrect variable name in a sample code related to injection attacks. The code uses `file_path` in one instance, while `fpath` is consistently used elsewhere. The suggested fix is to change the occurrence of `file_path` to `fpath` to ensure consistency and correctness in the example.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Top-10-Project/issues/19", @@ -21500,6 +22237,7 @@ "node_id": "I_kwDOCLuy5M5aZny2", "title": "A new version", "body": "Hey @MarcinHoppe and team,\r\n\r\nI'm Cesar, nice to meet you! I really like the Serverless Top 10 Project. I've been thinking about creating a PR with a new version corresponding to the OWASP 2021, but I'd like to know what would be your reaction beforehand.\r\n\r\nFollowing OWASP Top 10 changes from 2017 to 2021, most changes would be related with reorganizing the documentation and a few parts would be created from scratch.\r\n\r\nAdditionally, I'd like to get your opinion on how relevant is this project nowadays in terms of market and for you. I wouldn't like to work onto something that is not needed anymore.\r\n\r\nBest,\r\nCesar", + "summary": "A proposal has been made to create a pull request for a new version of the Serverless Top 10 Project that aligns with the OWASP 2021 updates. The suggested changes primarily involve reorganizing existing documentation, with some new sections to be developed. The author is seeking feedback on the relevance of the project in the current market and confirmation on whether to proceed with the PR. It would be beneficial to provide insights on the project's current importance and any specific areas that may need attention or enhancement.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Serverless-Top-10-Project/issues/21", @@ -21522,10 +22260,11 @@ "pk": 755, "fields": { "nest_created_at": "2024-09-11T21:13:29.879Z", - "nest_updated_at": "2024-09-11T21:13:29.879Z", + "nest_updated_at": "2024-09-13T16:12:15.677Z", "node_id": "MDU6SXNzdWU0Mjc3MTY0MDk=", "title": "Can this be removed?", "body": "Just asking :)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-safe-xml/issues/1", @@ -21552,6 +22291,7 @@ "node_id": "I_kwDOCE4w7c486de7", "title": "verbose alert in CLI", "body": "* need to add multi verbose alerts in our cli", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Python-Honeypot/issues/338", @@ -21580,6 +22320,7 @@ "node_id": "I_kwDOCE4w7c5HM7AA", "title": "Cannot connect to elasticsearch", "body": "Hi,\r\nI have followed the below steps.\r\n\r\nDownloaded code from https://github.com/OWASP/Python-Honeypot.git\r\ncd Python-Honeypot\r\ndocker-compose -f docker-compose-host.yml up\r\nand\r\ndocker-compose \r\n\r\nBut throws this error. Thanks\r\n\r\nroot@Python-Honeypot-master# docker-compose up\r\nStarting elasticsearch ... done\r\nStarting python-honeypot-master_ohp_1 ... done\r\nStarting grafana ... done\r\nAttaching to elasticsearch, python-honeypot-master_ohp_1, grafana\r\ngrafana | GF_PATHS_DATA='/var/lib/grafana' is not writable.\r\ngrafana | You may have issues with file permissions, more information here: http://docs.grafana.org/installation/docker/#migrate-to-v51-or-later\r\ngrafana | mkdir: cannot create directory '/var/lib/grafana/plugins': Permission denied\r\ngrafana exited with code 1\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:46:56,153Z\", \"level\": \"INFO\", \"component\": \"o.e.n.Node\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"version[7.13.3], pid[7], build[default/docker/5d21bea28db1e89ecc1f66311ebdec9dc3aa7d64/2021-07-02T12:06:10.804015202Z], OS[Linux/5.4.0-105-generic/amd64], JVM[AdoptOpenJDK/OpenJDK 64-Bit Server VM/16/16+36]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:46:56,157Z\", \"level\": \"INFO\", \"component\": \"o.e.n.Node\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"JVM home [/usr/share/elasticsearch/jdk], using bundled JDK [true]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:46:56,157Z\", \"level\": \"INFO\", \"component\": \"o.e.n.Node\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"JVM arguments [-Xshare:auto, -Des.networkaddress.cache.ttl=60, -Des.networkaddress.cache.negative.ttl=10, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -XX:+ShowCodeDetailsInExceptionMessages, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dio.netty.allocator.numDirectArenas=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Djava.locale.providers=SPI,COMPAT, --add-opens=java.base/java.io=ALL-UNNAMED, -XX:+UseG1GC, -Djava.io.tmpdir=/tmp/elasticsearch-4171500607543486750, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, -Des.cgroups.hierarchy.override=/, -Xms4096m, -Xmx4096m, -XX:MaxDirectMemorySize=2147483648, -XX:G1HeapRegionSize=4m, -XX:InitiatingHeapOccupancyPercent=30, -XX:G1ReservePercent=15, -Des.path.home=/usr/share/elasticsearch, -Des.path.conf=/usr/share/elasticsearch/config, -Des.distribution.flavor=default, -Des.distribution.type=docker, -Des.bundled_jdk=true]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,096Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [aggs-matrix-stats]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,096Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [analysis-common]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,096Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [constant-keyword]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,097Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [frozen-indices]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,098Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [ingest-common]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,098Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [ingest-geoip]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,100Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [ingest-user-agent]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,103Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [kibana]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,110Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [lang-expression]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,111Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [lang-mustache]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,112Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [lang-painless]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,112Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [mapper-extras]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,114Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [mapper-version]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,115Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [parent-join]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,115Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [percolator]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,116Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [rank-eval]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,118Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [reindex]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,119Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [repositories-metering-api]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,120Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [repository-encrypted]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,120Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [repository-url]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,123Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [runtime-fields-common]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,124Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [search-business-rules]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,124Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [searchable-snapshots]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,126Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [snapshot-repo-test-kit]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,127Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [spatial]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,128Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [transform]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,129Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [transport-netty4]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,135Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [unsigned-long]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,135Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [vectors]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,136Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [wildcard]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,137Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-aggregate-metric]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,138Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-analytics]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,139Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-async]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,139Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-async-search]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,140Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-autoscaling]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,142Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-ccr]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,142Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-core]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,146Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-data-streams]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,147Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-deprecation]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,148Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-enrich]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,149Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-eql]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,150Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-fleet]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,151Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-graph]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,152Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-identity-provider]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,153Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-ilm]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,154Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-logstash]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,155Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-ml]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,158Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-monitoring]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,159Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-ql]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,160Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-rollup]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,160Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-security]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,161Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-shutdown]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,162Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-sql]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,162Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-stack]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,166Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-text-structure]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,170Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-voting-only-node]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,172Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"loaded module [x-pack-watcher]\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,174Z\", \"level\": \"INFO\", \"component\": \"o.e.p.PluginsService\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"no plugins loaded\" }\r\nelasticsearch | {\"type\": \"server\", \"timestamp\": \"2022-04-06T11:47:02,376Z\", \"level\": \"ERROR\", \"component\": \"o.e.b.ElasticsearchUncaughtExceptionHandler\", \"cluster.name\": \"docker-cluster\", \"node.name\": \"6f1df84c0b6e\", \"message\": \"uncaught exception in thread [main]\",\r\nelasticsearch | \"stacktrace\": [\"org.elasticsearch.bootstrap.StartupException: ElasticsearchException[failed to bind service]; nested: AccessDeniedException[/usr/share/elasticsearch/data/nodes];\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:163) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:150) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:75) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:116) ~[elasticsearch-cli-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.cli.Command.main(Command.java:79) ~[elasticsearch-cli-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:115) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:81) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"Caused by: org.elasticsearch.ElasticsearchException: failed to bind service\",\r\nelasticsearch | \"at org.elasticsearch.node.Node.(Node.java:782) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.node.Node.(Node.java:278) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Bootstrap$5.(Bootstrap.java:217) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:217) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:397) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:159) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"... 6 more\",\r\nelasticsearch | \"Caused by: java.nio.file.AccessDeniedException: /usr/share/elasticsearch/data/nodes\",\r\nelasticsearch | \"at sun.nio.fs.UnixException.translateToIOException(UnixException.java:90) ~[?:?]\",\r\nelasticsearch | \"at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:106) ~[?:?]\",\r\nelasticsearch | \"at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:111) ~[?:?]\",\r\nelasticsearch | \"at sun.nio.fs.UnixFileSystemProvider.createDirectory(UnixFileSystemProvider.java:396) ~[?:?]\",\r\nelasticsearch | \"at java.nio.file.Files.createDirectory(Files.java:694) ~[?:?]\",\r\nelasticsearch | \"at java.nio.file.Files.createAndCheckIsDirectory(Files.java:801) ~[?:?]\",\r\nelasticsearch | \"at java.nio.file.Files.createDirectories(Files.java:787) ~[?:?]\",\r\nelasticsearch | \"at org.elasticsearch.env.NodeEnvironment.lambda$new$0(NodeEnvironment.java:265) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.env.NodeEnvironment$NodeLock.(NodeEnvironment.java:202) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.env.NodeEnvironment.(NodeEnvironment.java:262) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.node.Node.(Node.java:368) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.node.Node.(Node.java:278) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Bootstrap$5.(Bootstrap.java:217) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:217) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:397) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:159) ~[elasticsearch-7.13.3.jar:7.13.3]\",\r\nelasticsearch | \"... 6 more\"] }\r\nelasticsearch | uncaught exception in thread [main]\r\nelasticsearch | ElasticsearchException[failed to bind service]; nested: AccessDeniedException[/usr/share/elasticsearch/data/nodes];\r\nelasticsearch | Likely root cause: java.nio.file.AccessDeniedException: /usr/share/elasticsearch/data/nodes\r\nelasticsearch | at java.base/sun.nio.fs.UnixException.translateToIOException(UnixException.java:90)\r\nelasticsearch | at java.base/sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:106)\r\nelasticsearch | at java.base/sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:111)\r\nelasticsearch | at java.base/sun.nio.fs.UnixFileSystemProvider.createDirectory(UnixFileSystemProvider.java:396)\r\nelasticsearch | at java.base/java.nio.file.Files.createDirectory(Files.java:694)\r\nelasticsearch | at java.base/java.nio.file.Files.createAndCheckIsDirectory(Files.java:801)\r\nelasticsearch | at java.base/java.nio.file.Files.createDirectories(Files.java:787)\r\nelasticsearch | at org.elasticsearch.env.NodeEnvironment.lambda$new$0(NodeEnvironment.java:265)\r\nelasticsearch | at org.elasticsearch.env.NodeEnvironment$NodeLock.(NodeEnvironment.java:202)\r\nelasticsearch | at org.elasticsearch.env.NodeEnvironment.(NodeEnvironment.java:262)\r\nelasticsearch | at org.elasticsearch.node.Node.(Node.java:368)\r\nelasticsearch | at org.elasticsearch.node.Node.(Node.java:278)\r\nelasticsearch | at org.elasticsearch.bootstrap.Bootstrap$5.(Bootstrap.java:217)\r\nelasticsearch | at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:217)\r\nelasticsearch | at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:397)\r\nelasticsearch | at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:159)\r\nelasticsearch | at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:150)\r\nelasticsearch | at org.elasticsearch.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:75)\r\nelasticsearch | at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:116)\r\nelasticsearch | at org.elasticsearch.cli.Command.main(Command.java:79)\r\nelasticsearch | at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:115)\r\nelasticsearch | at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:81)\r\nelasticsearch | For complete error details, refer to the log at /usr/share/elasticsearch/logs/docker-cluster.log\r\nelasticsearch exited with code 1\r\nohp_1 | ______ __ _____ _____\r\nohp_1 | / __ \\ \\ / /\\ / ____| __ \\\r\nohp_1 | | | | \\ \\ /\\ / / \\ | (___ | |__) |\r\nohp_1 | | | | |\\ \\/ \\/ / /\\ \\ \\___ \\| ___/\r\nohp_1 | | |__| | \\ /\\ / ____ \\ ____) | |\r\nohp_1 | \\____/ \\/ \\/_/ \\_\\_____/|_|\r\nohp_1 | _ _ _____ _\r\nohp_1 | | | | | | __ \\ | |\r\nohp_1 | | |__| | ___ _ __ ___ _ _| |__) |__ | |_\r\nohp_1 | | __ |/ _ \\| \"_ \\ / _ \\ | | | ___/ _ \\| __|\r\nohp_1 | | | | | (_) | | | | __/ |_| | | | (_) | |_\r\nohp_1 | |_| |_|\\___/|_| |_|\\___|\\__, |_| \\___/ \\__|\r\nohp_1 | __/ |\r\nohp_1 | |___/\r\nohp_1 |\r\nohp_1 | [X] [2022-04-06 11:47:04] Cannot connect to elasticsearch\r\nohp_1 | python-honeypot-master_ohp_1 exited with code 1\r\nroot@ub-c2:/home/sk/Python-Honeypot-master# cat /usr/share/elasticsearch/logs/docker-cluster.log\r\ncat: /usr/share/elasticsearch/logs/docker-cluster.log: No such file or directory\r\n\r\n_________________\r\n**OS**: `Ubuntu`\r\n\r\n**OS Version**: `20 LTS`\r\n\r\n**Python Version**: `Python 3.8.10`\r\n\r\n\r\n\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Python-Honeypot/issues/349", @@ -21606,6 +22347,7 @@ "node_id": "I_kwDOCE4w7c5OXt13", "title": "network thread issue", "body": "two issues\r\n\r\n1. network thread is not terminating after an error\r\nto reproduce the issue, run a module, and then kill the container with `docker kill container_name`. OHP is expected to exit from working and throw an error, but the network thread stays open and it's not terminating.\r\n\r\n2. when I press ctrl+c to terminate the module, pcap file is not submitted to the database.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Python-Honeypot/issues/359", @@ -21636,6 +22378,7 @@ "node_id": "I_kwDOCE4w7c5PubiL", "title": "Error in ssh/strong_password module", "body": "# There is a Hash Mismatch error while creating the ssh/strong_password module\r\n```sh\r\n#10 123.8 E: Failed to fetch http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl-dev_3.0.2-0ubuntu1.6_amd64.deb Hash Sum mismatch\r\n#10 123.8 Hashes of expected file:\r\n#10 123.8 - SHA512:813612d32c00b73881991d92a2098b2c098ee7e4ad45d1bc9a6228bc5bc627e66014444dccdefcda166f5ffa01ff4806fdc57214ffe052e84f021726f104d39c\r\n#10 123.8 - SHA256:62f764e8034df6bc0ae240ee5fe2db7fc0a12cb0d7d3e2aba1cf6da642a6fb0d\r\n#10 123.8 - SHA1:3054526b55411ceaf7f0c9eada2598de6f4bc575 [weak]\r\n#10 123.8 - MD5Sum:a8a4aba4b8873398e243dd327080d48a [weak]\r\n#10 123.8 - Filesize:2370414 [weak]\r\n#10 123.8 Hashes of received file:\r\n#10 123.8 - SHA512:82805e89206bfb79b4d1dc52d06c1aeb756e57be5603cd434ab818c4b4708cd01f4143c88eb8a7b7f90e701a6bed720da9d1586ae34def2adff16f2f3c843f5a\r\n#10 123.8 - SHA256:3aabc4bae4d39e45b5c0850699a7548f7c80f4a5f2e17de55974b277b0cd6e8a\r\n#10 123.8 - SHA1:9ab406826368f95ed34543d90cf23ec92584718e [weak]\r\n#10 123.8 - MD5Sum:048cef564f311227505bf2225054f155 [weak]\r\n#10 123.8 - Filesize:2370414 [weak]\r\n#10 123.8 Last modification reported: Tue, 05 Jul 2022 13:08:45 +0000\r\n#10 123.8 E: Unable to fetch some archives, maybe run apt-get update or try with --fix-missing?\r\n```", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Python-Honeypot/issues/362", @@ -21662,6 +22405,7 @@ "node_id": "I_kwDOCE4w7c5baskC", "title": "unable to connect to elasticsearch", "body": "Hey,\r\nI've followed the installation steps and guide given in the wiki page but when ever I try to run the ohp.py I'm getting the \"unable to connect to elasticsearch\" error elastic search is working on my local machine\r\n\r\n![image](https://user-images.githubusercontent.com/72266248/212532008-acc5990a-92c7-40b5-aef8-36efe4112e7a.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Python-Honeypot/issues/367", @@ -21684,10 +22428,11 @@ "pk": 761, "fields": { "nest_created_at": "2024-09-11T21:13:43.602Z", - "nest_updated_at": "2024-09-11T21:13:43.602Z", + "nest_updated_at": "2024-09-13T16:12:22.172Z", "node_id": "I_kwDOCE4w7c6EoYl3", "title": "Problem with requirements", "body": "Hi,\r\nI've followed the installation steps and guide given in the wiki page, but I\\`ve got this error while starting via `python ohp.py --start-api-server` and the same error in docker compose: `ImportError: cannot import name 'url_quote' from 'werkzeug.urls'`\r\nI\\`ve googled it and I understood that version of werkzeug must be the same as flask version, so I\\`ve updated `requrements.txt`\r\nThan I\\`ve got this error:\r\n![изображение](https://github.com/OWASP/Python-Honeypot/assets/166014299/3ce0b04b-f959-469a-bfdc-ad1a18788b73)\r\n\r\n\r\n\r\n_________________\r\n**OS**: `Ubuntu`\r\n\r\n**OS Version**: `22.04`\r\n\r\n**Python Version**: `3.10.12`", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Python-Honeypot/issues/376", @@ -21710,10 +22455,11 @@ "pk": 762, "fields": { "nest_created_at": "2024-09-11T21:13:57.204Z", - "nest_updated_at": "2024-09-11T21:13:57.204Z", + "nest_updated_at": "2024-09-13T16:12:32.008Z", "node_id": "MDU6SXNzdWU4MjI1MTc3NjE=", "title": "Couldn't find minutes of the meet conducted @26th Feb 2021", "body": "This [link](https://www.owasp.org/index.php/Women_In_AppSec) appears broken, has it migrated to somewhere else?\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/WIA/issues/1", @@ -21740,6 +22486,7 @@ "node_id": "MDU6SXNzdWUzNTU5NjY2ODA=", "title": "Develop a new VM based honeypot/probe based on CRS v3.2.", "body": "We need to create a new deployment using CRS v3.0-v3.1\r\nIdeally the setup using [packer](https://www.packer.io) will provide us a simple and repetitive way of generating for many different platforms.", + "summary": "The issue discusses the development of a new VM-based honeypot/probe using CRS v3.2, while also requiring the creation of a deployment with CRS v3.0-v3.1. The goal is to utilize Packer to ensure a streamlined and repeatable process for generating deployments across various platforms. It is suggested that the first step involves setting up the necessary configurations and scripts for Packer to facilitate this deployment process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Honeypot-Project/issues/3", @@ -21769,6 +22516,7 @@ "node_id": "MDU6SXNzdWUzNTU5NzE2MzU=", "title": "Develop new alternative small footprint honeypots", "body": "Small footprint honeypot/probe formats utilising Docker & Raspberry Pi", + "summary": "The issue discusses the need to develop new small footprint honeypots or probes that can be deployed using Docker and Raspberry Pi. The focus is on creating lightweight solutions that can effectively gather data without requiring extensive resources. To address this, it may be beneficial to explore existing honeypot frameworks and adapt them for Docker and Raspberry Pi environments, ensuring they remain efficient and effective in capturing malicious activity.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Honeypot-Project/issues/6", @@ -21798,6 +22546,7 @@ "node_id": "MDU6SXNzdWUzNTU5NzQwNDQ=", "title": "Develop machine learning approach to automatically be able to update the rule set being used by the probe based on cyber threat intelligence received", "body": "", + "summary": "The issue discusses the need to create a machine learning solution that can automatically update the rule set utilized by a probe in response to incoming cyber threat intelligence. This involves designing a system that can intelligently adapt the rules based on new threats, enhancing the probe's effectiveness. The next steps would likely include outlining the specific requirements for the machine learning model and determining how to integrate it with the existing rule set management.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Honeypot-Project/issues/7", @@ -21826,6 +22575,7 @@ "node_id": "MDU6SXNzdWUzNTU5NzU4NTA=", "title": "Create module for CMSs", "body": "One of the problems for adoption is the lack of simple inclusion into well-known CMSs.\r\nThe idea is to provide a simple add-on/plugin/modules to include the functionality of semi-automatic configuration for the major CMSs: Wordpress, Joomla, Drupal.\r\n\r\nThe modules will:\r\n- provide a simple way for configuring the Honeypot data, e.g. Server, redirection, possible variables that will act as the honeypot\r\n- a way of inserting that variables at randon on the web interface\r\n- redirection to the Honeypot when this variable is modified", + "summary": "The issue discusses the need for creating modules that facilitate the integration of a specific functionality into popular content management systems (CMSs) like WordPress, Joomla, and Drupal. The proposed modules should allow users to easily configure Honeypot data, including server settings and redirection options. Additionally, they should enable the insertion of variables at random in the web interface and handle redirection to the Honeypot when these variables are altered. To move forward, the development of these modules is necessary to enhance user adoption.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Honeypot-Project/issues/9", @@ -21854,6 +22604,7 @@ "node_id": "MDU6SXNzdWU3ODAyMzkzODE=", "title": "Logstash Cant communication on 5044: Connection refuse", "body": "Hello,\r\n\r\nI've Implemented honeypot project locally .\r\nAs soon as I trigger mod-sec rule. (by hitting curl request)\r\nPython file start processing those rule but,\r\ngot this error.\r\n\r\n```\r\nERROR\tpipeline/output.go:100\tFailed to connect to backoff(async(tcp://127.0.0.1:5044)): dial tcp 127.0.0.1:5044: connect: connection refused\r\nmodsec_app | 2021-01-06T07:49:46.218Z\tINFO\tpipeline/output.go:93\tAttempting to reconnect to backoff(async(tcp://127.0.0.1:5044)) with 3 reconnect attempt(s)\r\n```\r\n\r\nAny thoughts on this ?\r\n\r\nmy **env** file\r\n\r\n`LOGSTASH_HOST=127.0.0.1:5044\r\n`\r\n\r\nmy **filebeat.yml**\r\n\r\n```\r\noutput:\r\n logstash:\r\n enabled: true\r\n hosts: '${LOGSTASH_HOST:?must set LOGSTASH_HOST env variable}'\r\n timeout: 15\r\n \r\n\r\nfilebeat:\r\n inputs:\r\n -\r\n paths:\r\n - /var/log/modsec_audit_processed.log\r\n type: log\r\n json.keys_under_root: true\r\n json.add_error_key: true\r\n\r\n```\r\n\r\nmy **logstash.conf** file\r\n\r\n```\r\ninput {\r\n beats {\r\n port => 5044\r\n type => \"mod_security\"\r\n codec => json\r\n ssl => false\r\n }\r\n}\r\n\r\n\r\noutput {\r\n elasticsearch {\r\n hosts => \"elasticsearch:9200\"\r\n manage_template => false\r\n index => \"%{[@metadata][beat]}-%{+YYYY.MM.dd}\"\r\n document_type => \"%{[@metadata][type]}\"\r\n }\r\n stdout { codec => rubydebug }\r\n}\r\n```", + "summary": "The issue involves a failure to connect to Logstash on port 5044, resulting in a \"connection refused\" error. The user has set up a honeypot project and is attempting to send data from Filebeat to Logstash but encounters this error when triggering a mod-security rule.\n\nThe relevant configurations include the environment variable for Logstash, the Filebeat configuration, and the Logstash input setup. The user is advised to check if Logstash is running and listening on the specified port (5044) to resolve the connection issue. Additionally, verifying network settings and ensuring that the correct services are up and running may be necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Honeypot-Project/issues/16", @@ -21880,6 +22631,7 @@ "node_id": "MDU6SXNzdWU4NDE1ODg2NzY=", "title": "How information is shared from elasticsearch to MISP", "body": "I need code location for sharing information from elasticsearch to MISP.", + "summary": "The issue requests information about the code responsible for sharing data from Elasticsearch to MISP. It seeks to identify the specific code location that handles this functionality. To address this, the relevant code should be located and shared with the requester.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Honeypot-Project/issues/17", @@ -21906,6 +22658,7 @@ "node_id": "MDU6SXNzdWU5MzY0ODE1OTQ=", "title": "No Docker-compose.yml file found", "body": "there is no \"docker-compose.yml\" file for the honeytrap folder. ", + "summary": "The issue indicates that a \"docker-compose.yml\" file is missing from the honeytrap folder. To resolve this, a suitable \"docker-compose.yml\" file needs to be created and added to the specified directory.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Honeypot-Project/issues/18", @@ -21928,10 +22681,11 @@ "pk": 770, "fields": { "nest_created_at": "2024-09-11T21:14:13.860Z", - "nest_updated_at": "2024-09-12T00:16:13.369Z", + "nest_updated_at": "2024-09-13T17:00:15.016Z", "node_id": "I_kwDOB9f3oc5gsWed", "title": "Lists of feature and optimisation requirements", "body": "### initiatives\r\n\r\nThe project has been idle for a while. The issue will be created as a root ticket to gather new items (including some of the old tickets) which are going to be implemented.\r\n\r\n### Items\r\n\r\n- [x] [FEAT] Develop alternative small-footprint honeypot/probe formats.\r\n- [ ] [FEAT] pluggable modules #9 \r\n- [ ] [OPT] Consider new alternatives for log transfer approaches. #6\r\n- [ ] [OPT] The project structure may be a little confusing for individuals to run it. Consider refactoring the structure of the docker-compose file, the folder structure, and the README file.\r\n- [ ] [FIX] images cannot work correctly in some OS (e.g., MacOS) because it does not specify the version.\r\n", + "summary": "The project has been inactive for some time, and this issue serves as a central ticket to compile new and existing feature and optimization requests for future implementation. \n\nA few items have been identified, including the development of alternative honeypot formats and the need for pluggable modules. Additionally, there are suggestions to explore new log transfer methods, refactor the project's structure for clarity, and address compatibility issues with images on certain operating systems like MacOS. \n\nNext steps include reviewing and prioritizing these items for implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Honeypot-Project/issues/20", @@ -21963,6 +22717,7 @@ "node_id": "I_kwDOB9f3oc5rWcYs", "title": "any instructions and guidelines for install ?", "body": "", + "summary": "The issue is requesting instructions and guidelines for installation. To address this, it would be helpful to provide a detailed installation guide, including prerequisites, step-by-step instructions, and any troubleshooting tips.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Honeypot-Project/issues/22", @@ -21989,6 +22744,7 @@ "node_id": "MDU6SXNzdWUzNDUyMTk4NDc=", "title": "Application Crashes on Webkit Cache Challenge", "body": "After starting exercise Data Protection (Rest) -> Webkit Cache, application crashes and user gets out of the app. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/7", @@ -22017,6 +22773,7 @@ "node_id": "MDU6SXNzdWUzNDUyMjIxMjk=", "title": "Donate button on Github repo is not working", "body": "Donate button provided on Github repo is not working. Its redirecting to non-existing Github page. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/8", @@ -22045,6 +22802,7 @@ "node_id": "MDU6SXNzdWUzNDUyMzMxMDE=", "title": "Cydia repo missing iGoat binary", "body": "iGoat cydia repo seems empty. Fix it at https://swiftigoat.yourepo.com/ ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/10", @@ -22073,6 +22831,7 @@ "node_id": "MDU6SXNzdWU0MDg1MDgwNzU=", "title": "How do i run iGoat ?", "body": "I just installed iGoat from repo, but the app doesn't appear on my phone\r\nhow do i run it ?\r\n\r\n![img_0069](https://user-images.githubusercontent.com/30687786/52530902-b555cf00-2d3f-11e9-85df-1015d3f3fd6e.PNG)\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/16", @@ -22099,6 +22858,7 @@ "node_id": "MDU6SXNzdWU0MjczNDkwODQ=", "title": "SQL Injection: Error opening articles database", "body": "I believe the articles.sqlite file is missing. Could you provide the table definition and I can create my own articles.sqlite file? ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/17", @@ -22125,6 +22885,7 @@ "node_id": "MDU6SXNzdWU0NzQzNTA0NzM=", "title": "Cannot find 8082 service in Crypto Challenge Exercise", "body": "Crypto Challenge Exercise need call the url http://localhost:8082/checkout/.\r\n\r\nHowever, I cannot find the 8082 service is up.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/18", @@ -22151,6 +22912,7 @@ "node_id": "MDU6SXNzdWU2MDMwNzgyOTg=", "title": "How to defence from the URLSchemeAttack? ", "body": "I got an idea about How URLSchemeAttack happened? Now I'm looking for the solution to avoide it.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/23", @@ -22177,6 +22939,7 @@ "node_id": "MDU6SXNzdWU2ODk4MjQzNzE=", "title": "iGoat-Swift igoat_server.rb not working.", "body": "Error: \r\n\r\n$ ./igoat_server.rb\r\n/System/Library/Frameworks/Ruby.framework/Versions/2.6/usr/lib/ruby/2.6.0/universal-darwin19/rbconfig.rb:229: warning: Insecure world writable dir /usr/local/opt in PATH, mode 04077\r\nTraceback (most recent call last):\r\n 2: from ./igoat_server.rb:57:in `
'\r\n 1: from /Library/Ruby/Site/2.6.0/rubygems/core_ext/kernel_require.rb:92:in `require'\r\n/Library/Ruby/Site/2.6.0/rubygems/core_ext/kernel_require.rb:92:in `require': cannot load such file -- sinatra/base (LoadError)\r\n\r\nSystem Config:\r\nMac os: Cataline 10.15.6\r\nRuby Version: ruby 2.6.3p62 (2019-04-16 revision 67580) [universal.x86_64-darwin19]\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/25", @@ -22203,6 +22966,7 @@ "node_id": "MDU6SXNzdWU4NzI2MDE0NzE=", "title": "App Crashes when clicking the submit button for the server communication section", "body": "When clicking the `submit` button on the `server communication` section of the `data protection (transit)` menu the app just crashes out \r\n\r\n[coretrace (1).log](https://github.com/OWASP/iGoat-Swift/files/6405572/coretrace.1.log)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/27", @@ -22229,6 +22993,7 @@ "node_id": "I_kwDOB5_xwc5hsWpF", "title": "where are the solutions?", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/35", @@ -22255,6 +23020,7 @@ "node_id": "I_kwDOB5_xwc5hwcoC", "title": "provide updated ipa file", "body": "Are there any plans to include an updated IPA file to replace the outdated one?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/36", @@ -22277,10 +23043,11 @@ "pk": 783, "fields": { "nest_created_at": "2024-09-11T21:14:31.581Z", - "nest_updated_at": "2024-09-11T21:14:31.581Z", + "nest_updated_at": "2024-09-13T16:12:43.124Z", "node_id": "I_kwDOB5_xwc5iHEeY", "title": "no such module 'Realm'", "body": "ive opened the xcode project and attempt to build but in the insecure data storage -- Realm -- RealmExerciseVC, i get 'no such module 'realm' . ive run pod install and made sure cocoapods is up to date, deleted the derived data directory and cleaned the build. still getting the error. not sure what else to do. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/iGoat-Swift/issues/37", @@ -22307,6 +23074,7 @@ "node_id": "MDU6SXNzdWUzOTM1OTQ4MTA=", "title": "Skill level incorrectly affects likelihood", "body": "Hi.\r\n\r\nHigher technical skill level required from a threat actor should lower likelihood, not the other way around (as it is now).\r\n\r\nSee https://obet.us/owasprr.html#1,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,50 vs https://obet.us/owasprr.html#9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,50\r\n\r\nThanks", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/1", @@ -22335,6 +23103,7 @@ "node_id": "MDU6SXNzdWU0MjE5ODI1NjM=", "title": "Want to work on this project", "body": "I want to work on this project . Can I work on this project", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/2", @@ -22363,6 +23132,7 @@ "node_id": "MDU6SXNzdWU0Mzk0Njg1Nzk=", "title": "reference for sast tool", "body": "https://docs.gitlab.com/ee/user/application_security/sast/", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/20", @@ -22391,6 +23161,7 @@ "node_id": "MDU6SXNzdWU1NTM5NjMzOTg=", "title": "Not working as should", "body": "Good day man.\r\nthanks for project.\r\nThe problem on macos.\r\nThe thing is:\r\nI did everything from the guide but still the project will only send by api project creation string but will not send any file for analysis as long as buttons do not work.\r\n\r\n\"изображение\"\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/56", @@ -22419,6 +23190,7 @@ "node_id": "MDU6SXNzdWU1Nzg1MjkxODk=", "title": "Bcrypt version no longer listed unable to run NPM install under /API/", "body": "Hi, the version of bcrypt does not look like it is listed anymore, the script hits a (404) i checked the URL destination myself and got a 404, the github user under /releases/ no longer shows v72-linux. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/57", @@ -22445,6 +23217,7 @@ "node_id": "MDU6SXNzdWU1Nzg1NjIxMDY=", "title": "Running 'npm start' under /api/ throws properties error server of null", "body": "When running npm start, output comes back with property of null under server ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/58", @@ -22471,6 +23244,7 @@ "node_id": "MDU6SXNzdWU1ODU4MDM3ODA=", "title": "Not analyzing", "body": "Hello\r\nI try to scan an existing project but it can not analyze.\r\n\r\nINFO: Load project settings for component key: '5cc5a81df8f52d06ad7a5298aeb5ea2b'\r\nINFO: Load project settings for component key: '5cc5a81df8f52d06ad7a5298aeb5ea2b' (done) | time=82ms\r\nINFO: Load quality profiles\r\nINFO: Load quality profiles (done) | time=129ms\r\nINFO: Load active rules\r\nINFO: Load active rules (done) | time=2490ms\r\nINFO: Indexing files...\r\nINFO: Project configuration:\r\nINFO: 1 file indexed\r\nINFO: 0 files ignored because of scm ignore settings\r\nINFO: ------------- Run sensors on module 5cc5a81df8f52d06ad7a5298aeb5ea2b\r\nINFO: Load metrics repository\r\nINFO: Load metrics repository (done) | time=49ms\r\nINFO: Sensor JaCoCo XML Report Importer [jacoco]\r\nINFO: Sensor JaCoCo XML Report Importer [jacoco] (done) | time=3ms\r\nINFO: Sensor JavaXmlSensor [java]\r\nINFO: Sensor JavaXmlSensor [java] (done) | time=1ms\r\nINFO: Sensor HTML [web]\r\nINFO: Sensor HTML [web] (done) | time=4ms\r\nINFO: ------------- Run sensors on project\r\nINFO: Sensor Zero Coverage Sensor\r\nINFO: Sensor Zero Coverage Sensor (done) | time=2ms\r\nINFO: CPD Executor Calculating CPD for 0 files\r\nINFO: CPD Executor CPD calculation finished (done) | time=0ms\r\nINFO: Analysis report generated in 684ms, dir size=74 KB\r\nINFO: Analysis report compressed in 80ms, zip size=9 KB\r\nINFO: Analysis report uploaded in 65ms\r\nINFO: ANALYSIS SUCCESSFUL, you can browse http://localhost:9000/dashboard?id=5cc5a81df8f52d06ad7a5298aeb5ea2b\r\nINFO: Note that you will be able to access the updated dashboard once the server has processed the submitted analysis report\r\nINFO: More about the report processing at http://localhost:9000/api/ce/task?id=AXED3w4aSbhJgxCN4yPY\r\nINFO: Analysis total time: 9.960 s\r\nINFO: ------------------------------------------------------------------------\r\nINFO: EXECUTION SUCCESS\r\nINFO: ------------------------------------------------------------------------\r\nINFO: Total time: 12.300s\r\nINFO: Final Memory: 12M/44M\r\n\r\nIt seems passed at SonarQube\r\n![Capture](https://user-images.githubusercontent.com/16667111/77259541-34337000-6c93-11ea-8816-436171aeb423.PNG)\r\n\r\nLanguage etc seems empty in SQL\r\n![Capture2](https://user-images.githubusercontent.com/16667111/77259595-9b512480-6c93-11ea-8f60-677346a0602c.PNG)\r\n\r\n\r\nThe problem is i could scan it with SonarQube and it found some vulnerabilities.\r\n\r\nWhat is wrong in my case?\r\nThank you for your help.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/59", @@ -22497,6 +23271,7 @@ "node_id": "MDU6SXNzdWU3MDExNjU5NDE=", "title": "Risk Rating Calculator Issue", "body": "It seems there is a mistake there where in the Threat Agent Factors, in the Skill Level , it’s 1 for “Security penetration skills” up to 9 –“No technical skills” where it supposed to be vice versa as it refers to the skills that the Threat Agent has\r\nThe ease of exploit is evaluated separately as a stand alone factor. This factor refers to which skills the threat agent has and the higher the skills he/she has the higher likelihood we get.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/61", @@ -22523,6 +23298,7 @@ "node_id": "MDU6SXNzdWU3MTQwOTY2NTQ=", "title": "mongodb port", "body": "https://github.com/OWASP/RiskAssessmentFramework/blob/master/user-guide.md#step-2\r\n\r\nit only works for me, if i use the (default mongodb) port 27017 instead of 28017. maybe this is a typo?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/62", @@ -22549,6 +23325,7 @@ "node_id": "MDU6SXNzdWU3MTQwOTkyMzM=", "title": "how to use this?", "body": "i don't get how to use this. when i upload a zip with my project inside, the api console does something and there are no errors, but the gui shows 0 errors in my code.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/63", @@ -22575,6 +23352,7 @@ "node_id": "MDU6SXNzdWU3NTIxNzc3NjA=", "title": "Does the scan ability depends on SonarQube?", "body": "RiskAssessmentFramework Call SonarQube API, get the result and then display.\r\nDoes the scan ability depends on SonarQube?\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/64", @@ -22601,6 +23379,7 @@ "node_id": "MDU6SXNzdWU3ODU4ODA1NTc=", "title": "Failed to compile: Cannot find module 'node-sass'", "body": "I followed everything according to these steps\r\n\r\nhttps://github.com/OWASP/RiskAssessmentFramework/blob/master/user-guide.md\r\n\r\nWhen I tried to run **`ng serve`** or **`npm start`**, it shows the following error.\r\n\r\n![image](https://user-images.githubusercontent.com/16001925/104580434-64feee80-5685-11eb-859f-d92543d702fb.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/65", @@ -22627,6 +23406,7 @@ "node_id": "MDU6SXNzdWU4Mzc3ODUyODM=", "title": "Error On uploading a file due to cors error", "body": "When i try to upload a zip file with my code then i get the following error \r\n\"Cross-Origin Request Blocked: The Same Origin Policy disallows reading the remote resource at http://localhost:3000/upload/code. (Reason: CORS request did not succeed)\"", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/66", @@ -22653,6 +23433,7 @@ "node_id": "MDU6SXNzdWU4OTkxNTUyODI=", "title": "Potential Security Issue", "body": "👋 Hello, we've received a report for a potential critical severity security issue in your repository.\r\n\r\n#### Next Steps\r\n\r\n1️⃣ Visit **https://huntr.dev/bounties/1-other-OWASP/RiskAssessmentFramework** for more advisory information.\r\n\r\n2️⃣ **[Sign-up](https://huntr.dev/)** to validate or speak to the researcher for more assistance.\r\n\r\n3️⃣ Propose a patch or outsource it to our community.\r\n\r\n---\r\n\r\n#### Confused or need more help?\r\n\r\n- Join us on our **[Discord](https://huntr.dev/discord)** and a member of our team will be happy to help! 🤗\r\n\r\n- Speak to a member of our team: @JamieSlome\r\n\r\n---\r\n\r\n*This issue was automatically generated by [huntr.dev](https://huntr.dev) - a bug bounty board for securing open source code.*", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/67", @@ -22675,10 +23456,11 @@ "pk": 798, "fields": { "nest_created_at": "2024-09-11T21:14:53.723Z", - "nest_updated_at": "2024-09-11T21:14:53.723Z", + "nest_updated_at": "2024-09-13T16:12:49.937Z", "node_id": "I_kwDOB4M0TM5sfPlA", "title": "Cyber", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/RiskAssessmentFramework/issues/73", @@ -22705,6 +23487,7 @@ "node_id": "I_kwDOB4MVSM48sBpL", "title": "In which folder is monitor.py in sudo python monitor.py", "body": "localhost:4200 is valid ,but it shows localhost:5000 not working", + "summary": "Locate the `monitor.py` file within your project directory. Check the structure to identify where it is placed. Ensure you are using the correct path when executing `sudo python monitor.py`.\n\nVerify the configuration settings within `monitor.py` to confirm the server is set to run on `localhost:5000`. If it is configured to run on `localhost:4200`, adjust the code or command accordingly.\n\nIf `localhost:5000` is not working, perform the following steps:\n1. Check if another service is using port 5000 by running `lsof -i :5000` in your terminal.\n2. Inspect the logs for errors when starting the server. Look for any stack traces or error messages that could indicate the root cause.\n3. Ensure that the firewall settings on your machine allow traffic on port 5000. Adjust them if necessary.\n4. Test connectivity by running `curl http://localhost:5000` to see if you receive a response.\n\nIf issues persist, consider debugging the script by adding print statements or logging to identify where the execution may be failing.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureTea-Project/issues/335", @@ -22731,6 +23514,7 @@ "node_id": "I_kwDOB4MVSM4889cN", "title": "the commands for server mode in the quick-start guide don't work properly", "body": "In the repository, there are 2 commands for running the app in the server mode.\r\n\r\n[quick start](https://github.com/OWASP/SecureTea-Project#quick-start) : This one saves files of the repo in the `usr\\local\\bin`. Could anyone tell me why?\r\nand \r\n[after installation](https://github.com/OWASP/SecureTea-Project/blob/master/doc/en-US/user_guide.md#starting-up-in-server-mode) : This one runs fine\r\n\r\nPlease specify the difference between the two commands.\r\n\r\n", + "summary": "Investigate the discrepancies between the commands for running the app in server mode as documented in the quick-start guide and the user guide. \n\n1. **Identify the Commands**: Review both the quick-start command and the after-installation command to determine their structure and intended functionality.\n2. **Check File Paths**: Examine why the quick-start command saves files to `usr\\local\\bin`. Confirm if this is intended behavior or if it results from a misconfiguration.\n3. **Debug the Quick-Start Command**: Run the quick-start command in a controlled environment and capture any error messages or logs that may indicate why it fails to operate correctly.\n4. **Compare Outputs**: Compare the outputs of both commands to identify any differences in behavior or results.\n5. **Consult Documentation**: Review the documentation for any notes or comments regarding the differences between the quick-start and installation commands.\n\nBy following these steps, you will clarify the issues surrounding the commands and improve the documentation for users.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureTea-Project/issues/338", @@ -22757,6 +23541,7 @@ "node_id": "I_kwDOB4MVSM5F5fzQ", "title": "the build is failing", "body": "as per : \r\nhttps://app.travis-ci.com/github/OWASP/SecureTea-Project/builds/246126195\r\nbuild is failing due to error in pytest", + "summary": "Investigate the build failure in the SecureTea-Project repository on Travis CI, as detailed in the provided link. The error is occurring during the pytest execution phase. \n\n1. Check the Travis CI logs for specific error messages related to pytest.\n2. Identify any failing tests and review the corresponding test code.\n3. Ensure all dependencies are correctly installed in the testing environment.\n4. Run pytest locally to replicate the issue and gather more context on the failure.\n5. Consider updating pytest or other related libraries if compatibility issues are suspected.\n6. Review recent changes to the codebase that may have introduced the error.\n\nBegin by examining the specific errors in the Travis CI output to determine the root cause.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureTea-Project/issues/358", @@ -22783,6 +23568,7 @@ "node_id": "I_kwDOB4MVSM5H5apl", "title": "Using towncrier for managing the CHANGELOG file", "body": "#### Problem Description\r\n\r\n Editing a changelog file doesn't sound too hard, but usually in the end it is not a developer that does this, but GitHub and this go wrong too many times.\r\n\r\nTo avoid problems like this, the pip project has been using towncrier.\r\n\r\n#### Proposal\r\n Basically, you would create a news/1234.bugfix file or a news/42.feature file. You do not touch CHANGES.rst. At release time, the person doing the release calls towncrier, which handles updating CHANGES.rst and removing the news fragments.\r\n\r\n\r\n#### Useful resources\r\n1)https://pypi.org/project/towncrier/\r\n2)https://community.plone.org/t/using-towncrier-for-changes-rst-in-plone-packages/6486/15\r\n3)https://gist.github.com/mauritsvanrees/92c40bc16ceaf3c375d81c995b4552c4", + "summary": "Implement towncrier for managing the CHANGELOG file. \n\n1. Create news fragments for changes: Use the format `news/1234.bugfix` or `news/42.feature` to document changes.\n2. Avoid manual edits to `CHANGES.rst`: Ensure developers do not touch `CHANGES.rst` directly.\n3. Automate the release process: At release time, execute towncrier to update `CHANGES.rst` and remove the processed news fragments.\n\nTo tackle this problem, start by:\n- Installing towncrier using the command: `pip install towncrier`.\n- Creating a directory structure for news fragments.\n- Defining a release process that integrates towncrier commands.\n\nRefer to the provided resources for additional guidance:\n- [Towncrier on PyPI](https://pypi.org/project/towncrier/)\n- [Using Towncrier in Plone packages](https://community.plone.org/t/using-towncrier-for-changes-rst-in-plone-packages/6486/15)\n- [Gist on Towncrier usage](https://gist.github.com/mauritsvanrees/92c40bc16ceaf3c375d81c995b4552c4)", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureTea-Project/issues/361", @@ -22809,6 +23595,7 @@ "node_id": "I_kwDOB4MVSM5PnYVB", "title": "GaussianNB instance not fitted yet", "body": "Hi, am trying out the web application firewall but seem to run into this error. Checked everywhere and am unsure on how to continue.\r\n![image](https://user-images.githubusercontent.com/73624798/184107744-a55204f7-ee2e-4ce4-bee2-0d4773c25cc8.png)\r\n\r\n\r\n", + "summary": "Resolve the \"GaussianNB instance not fitted yet\" error encountered in the web application firewall. This error typically occurs when attempting to use a Gaussian Naive Bayes classifier without fitting it to the training data first.\n\n1. Ensure that you have initialized the GaussianNB instance, for example:\n ```python\n from sklearn.naive_bayes import GaussianNB\n gnb = GaussianNB()\n ```\n\n2. Fit the model to your training data before making predictions:\n ```python\n gnb.fit(X_train, y_train)\n ```\n\n3. Check that `X_train` and `y_train` are correctly defined and contain the appropriate data.\n\n4. Confirm that you are calling the prediction method after fitting the model:\n ```python\n predictions = gnb.predict(X_test)\n ```\n\n5. Review your data preprocessing steps to ensure that the training data is properly formatted and contains no missing values.\n\nFollow these steps to address the issue and ensure the GaussianNB instance is properly fitted before making predictions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureTea-Project/issues/371", @@ -22835,6 +23622,7 @@ "node_id": "I_kwDOB4MVSM5e4opA", "title": "running server failing", "body": "**Describe the bug**\r\n\r\nWhen server runned using pyhton it gives many errors\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. install dependencies to run the server\r\n2. run server using python3 run.py\r\n\r\nExpected:\r\nIt should start the server \r\nActual:\r\nGiving error of dependencies", + "summary": "**Fix Server Startup Errors**\n\n1. Verify installation of all required dependencies listed in the `requirements.txt` file. Use `pip install -r requirements.txt` to ensure all packages are correctly installed.\n \n2. Check for compatibility issues between Python version and dependencies. Ensure that the server is compatible with the Python version you are using (preferably Python 3.x).\n\n3. Review the error messages generated when running `python3 run.py`. Identify specific dependencies that are causing the errors and check their documentation for any additional setup required.\n\n4. If errors pertain to missing modules, confirm that they are included in the `requirements.txt` file and reinstall them if necessary.\n\n5. Test the server in a virtual environment to avoid conflicts with globally installed packages. Create a virtual environment using `python3 -m venv venv` and activate it before installing dependencies.\n\n6. If the issue persists, consider checking for open issues in the repository related to server startup errors or consult the community for similar problems.\n\n7. Document any errors and steps attempted to assist in further troubleshooting.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureTea-Project/issues/385", @@ -22861,6 +23649,7 @@ "node_id": "I_kwDOB4MVSM5f9UrO", "title": "System Information not showing in the Web UI", "body": "In the Images below, system information like CPU, RAM, Uptime, Swap cannot be seen.\r\n![Screenshot 2023-03-04 182457](https://user-images.githubusercontent.com/74853624/222924304-5b55fb95-ca35-4b43-8a8b-5268febb9b76.png)\r\nThis is probably due to errors in the API call for obtaining system infromation.\r\n![Screenshot 2023-03-04 182603](https://user-images.githubusercontent.com/74853624/222924330-1c91ee73-f7ba-4800-9015-745f418698e2.png)\r\n", + "summary": "Investigate the issue of missing system information (CPU, RAM, Uptime, Swap) in the Web UI. Confirm that the problem arises from errors in the API call responsible for retrieving this data.\n\n1. Review the API endpoint responsible for fetching system information. Check the request format and parameters to ensure they are correct.\n2. Examine server logs for any error messages or exceptions occurring during the API call.\n3. Test the API call independently using tools like Postman or cURL to verify its functionality and identify any issues.\n4. Check for any recent changes in the codebase that may have affected the API or the Web UI rendering logic.\n5. Ensure that the backend service providing system metrics is running and accessible.\n\nImplement fixes based on findings, and test the changes to restore visibility of system information in the Web UI.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureTea-Project/issues/392", @@ -22883,10 +23672,11 @@ "pk": 806, "fields": { "nest_created_at": "2024-09-11T21:15:04.440Z", - "nest_updated_at": "2024-09-12T00:11:03.228Z", + "nest_updated_at": "2024-09-13T16:59:52.088Z", "node_id": "I_kwDOB4MVSM5uB48n", "title": "DisplayUnlockCaptcha ", "body": "### Steps to reproduce\r\n\r\nno longer valid:\r\n\r\nhttps://accounts.google.com/DisplayUnlockCaptcha\r\n\r\nresult:\r\n\r\n> This website is no longer available\r\n>\r\n> To better protect your account, Google no longer allows account access with this website.\r\n>\r\n> If you have trouble signing in, try again from a familiar device or location.\r\n\r\nbut still in the code:\r\n\r\nhttps://github.com/search?q=repo%3AOWASP%2FSecureTea-Project%20DisplayUnlockCaptcha%20&type=code", + "summary": "Update the codebase to remove references to `DisplayUnlockCaptcha`. \n\n1. Verify the current functionality of the application to identify any dependencies on `DisplayUnlockCaptcha`.\n2. Search the entire codebase for occurrences of `DisplayUnlockCaptcha` using GitHub's code search feature.\n3. Remove or refactor the code segments that reference this deprecated functionality.\n4. Test the application thoroughly after modifications to ensure that no related features are broken.\n5. Update the documentation to reflect the removal of `DisplayUnlockCaptcha` and provide alternative solutions for users facing sign-in issues.\n\nBy addressing these tasks, ensure the project remains secure and up-to-date with current Google account access protocols.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureTea-Project/issues/408", @@ -22909,10 +23699,11 @@ "pk": 807, "fields": { "nest_created_at": "2024-09-11T21:15:18.691Z", - "nest_updated_at": "2024-09-11T21:15:18.691Z", + "nest_updated_at": "2024-09-13T16:13:00.493Z", "node_id": "MDU6SXNzdWUzMDIwMjY4MDc=", "title": "Convert pdf into markdown ", "body": "this one https://github.com/OWASP/user-security-stories/blob/master/user-security-stories.pdf", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/user-security-stories/issues/1", @@ -22939,6 +23730,7 @@ "node_id": "MDU6SXNzdWUzMDU1NjI1Njg=", "title": "Map acceptance criteria to ASVS technical validation steps", "body": "This will create a seamless chain of: problem definition -> solution requirement -> validation process. \r\n\r\nhttps://www.owasp.org/index.php/Category:OWASP_Application_Security_Verification_Standard_Project", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/user-security-stories/issues/2", @@ -22961,10 +23753,11 @@ "pk": 809, "fields": { "nest_created_at": "2024-09-11T21:15:27.723Z", - "nest_updated_at": "2024-09-11T21:15:27.723Z", + "nest_updated_at": "2024-09-13T16:13:07.153Z", "node_id": "MDU6SXNzdWU0MDEwNDQwNDk=", "title": "Future of lapse+", "body": "Hi @maldevel,\r\n\r\nFirst I was very happy to see lapse-plus being migrated to OWASP, but unfortunately it's inactive. Are there any plans for revival or further development?\r\n\r\nI liked the idea of this tool some years ago and also contributed some minor additions: https://github.com/bergerbd/lapse-plus/commits?author=nyc2\r\n\r\nOr, if there are no future plans, can you recommend an alternative using similar source->sink approach?\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/lapse-plus/issues/1", @@ -22991,6 +23784,7 @@ "node_id": "MDU6SXNzdWUyNjA0MTQwMTI=", "title": "Identifiers inclusion", "body": "Per https://mobile.twitter.com/kingthorin_rm/status/910134747455741957\r\n\r\n> You might also want an array element for arbitrary identifiers. WASC_ID, Testing Guide Refs, Top 10 IDs...etc", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/off/issues/4", @@ -23015,10 +23809,11 @@ "pk": 811, "fields": { "nest_created_at": "2024-09-11T21:15:57.813Z", - "nest_updated_at": "2024-09-11T21:15:57.813Z", + "nest_updated_at": "2024-09-13T16:13:30.528Z", "node_id": "MDU6SXNzdWU2NDg1NzQ4Mjg=", "title": "How does OFF relate to SARIF?", "body": "Seems like this effort is very similar to: https://github.com/sarif-standard\r\n\r\nStatic Analysis Results Interchange Format (SARIF) - A proposed standard for the output format of static analysis tools.\r\n\r\nMaybe join forces with, or simply work on that instead? Ideally, the format would support results from any type of appsec tool, not just static (e.g., SAST, DAST, IAST, and SCA (known CVEs in libraries)).", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/off/issues/7", @@ -23045,6 +23840,7 @@ "node_id": "MDU6SXNzdWU2NTQ3MDMxMzM=", "title": "Poor support for Qualys VM Findings", "body": "Not all fields in Qualys VM findings map well into OFF. \r\n\r\nSample list of findings & DTD attached.\r\n\r\n[qualys.zip](https://github.com/OWASP/off/files/4902711/qualys.zip)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/off/issues/10", @@ -23071,6 +23867,7 @@ "node_id": "MDU6SXNzdWU1NTIzNjIxNDY=", "title": "Code Review 101 - Refactor to allow other modules (Infrastructure as Code)", "body": "Refactor the code review 101 static site to allow adding more modules. First stage is to decouple the definition file and the welcome banner from the code running everything and put it into its own folder. Navigating to that URL should load the respective module. Consider multi-language support. like /codereview/101/en/\r\n", + "summary": "Refactor the Code Review 101 static site to enable the addition of additional modules, specifically for Infrastructure as Code. Begin by decoupling the definition file and the welcome banner from the main codebase. Create a dedicated folder for these components to improve organization. \n\nImplement routing so that navigating to a specific URL (e.g., /codereview/101/en/) loads the corresponding module. Consider adding support for multiple languages by structuring the URL to reflect language preferences.\n\nPossible first steps:\n1. Identify and extract the definition file and welcome banner code from the existing structure.\n2. Create a new directory (e.g., `/modules`) for storing the extracted components.\n3. Update the routing logic to serve the appropriate module based on the URL.\n4. Implement a language selection mechanism to handle different language modules.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureCodingDojo/issues/21", @@ -23097,6 +23894,7 @@ "node_id": "MDU6SXNzdWU1NTIzNjQyNzA=", "title": "Code Review 101 - Change definition file from JSON to YAML", "body": "Investigate whether the definition JSON could be converted to YAML and parsed on the client side back to JSON. This will facilitate collaboration and easier translation of the content.", + "summary": "Change the definition file format from JSON to YAML. Investigate the feasibility of converting the existing definition JSON files to YAML format. Assess how to implement parsing of the YAML back to JSON on the client side. \n\nTo tackle this problem, follow these steps:\n1. Review the current structure of the JSON definition files.\n2. Identify key features of the JSON that need to be preserved in the YAML format.\n3. Use a library (e.g., PyYAML for Python, js-yaml for JavaScript) to convert JSON files to YAML and vice versa.\n4. Test the conversion process to ensure that all data is accurately represented.\n5. Create a parsing function on the client side that reads the YAML and converts it back to JSON for use in the application.\n6. Update documentation to reflect the change in format and guide users on how to work with YAML files.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureCodingDojo/issues/22", @@ -23123,6 +23921,7 @@ "node_id": "MDU6SXNzdWU1NTIzNjYwMzg=", "title": "Code Review 101 - Multi language support", "body": "Language is determined by the URL. For example: /codereview/{module}/{lang}\r\nImplement a drop down menu with flags that would simply navigate to the corresponding module language if it exists.", + "summary": "Implement multi-language support in the Code Review application. Determine the language based on the URL format: `/codereview/{module}/{lang}`. \n\n1. Create a dropdown menu featuring flags representing available languages.\n2. Ensure that selecting a flag navigates to the corresponding module language URL, provided it exists.\n3. Verify the list of available languages for each module and dynamically populate the dropdown.\n4. Implement error handling for cases where a selected language does not exist for a specific module.\n\nBegin by:\n- Analyzing the current routing setup to accommodate language parameters.\n- Designing the dropdown menu UI with appropriate flags.\n- Writing a function to handle the navigation logic based on the selected language.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureCodingDojo/issues/23", @@ -23149,6 +23948,7 @@ "node_id": "I_kwDOBgZk1M5Qaedp", "title": "[Leaderboardctrl. js]upgrade of this file caused the dashboard N/A to be empty", "body": "", + "summary": "Investigate the recent upgrade of the `LeaderboardCtrl.js` file that caused the dashboard to display as N/A. Check for any changes in the data fetching logic or rendering processes that may lead to the dashboard being empty. \n\n1. Review the commit history for `LeaderboardCtrl.js` to identify specific changes made during the upgrade.\n2. Examine data binding and API call functions for any alterations that could affect data retrieval.\n3. Debug the component lifecycle methods to ensure that data is being loaded and rendered correctly.\n4. Test the application to reproduce the empty dashboard issue and collect console logs for error messages.\n5. Analyze the state management to confirm that the dashboard state is updated correctly after the upgrade.\n\nResolve any identified discrepancies to restore functionality to the dashboard.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureCodingDojo/issues/108", @@ -23175,6 +23975,7 @@ "node_id": "I_kwDOBgZk1M56_0TL", "title": "Second Degree Black Belt : XML External Entities environment hostname is not getting set", "body": "While I'm trying to solve the Second Degree Black Belt : XML External Entities challenge, observed that levering the previous challenge i.e. Injection got that there is one 'connecttocommandproc.sh' file when I try to read to content it is observed that the in URL only host2:8080 is mentioned, I think when deploying the hackerden challenge the environment value for host2 and flag-_secret values are not getting set.\r\nCan someone please look into this and help me to get the host2 value,", + "summary": "Investigate the environment variable settings for the Second Degree Black Belt: XML External Entities challenge. Confirm that the hostname for `host2` is properly set in the deployment configuration. \n\n1. Check the deployment scripts or configuration files for any references to `host2` and ensure they are correctly assigning values.\n2. Review the `connecttocommandproc.sh` file to identify how it retrieves the `host2` value. \n3. Look for any environment variable setup in the server or container orchestration tool being used (e.g., Docker, Kubernetes).\n4. If necessary, modify the deployment configuration to include the correct values for `host2` and `flag-_secret`.\n5. Redeploy the challenge environment and verify if the `host2` value is now correctly set.\n\nDebugging this issue will require access to the deployment environment and possibly logs to trace where the values might be failing to set.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureCodingDojo/issues/139", @@ -23201,6 +24002,7 @@ "node_id": "I_kwDOBgZk1M5-0wny", "title": "Slack Integration using unmaintained passport-slack", "body": "If you attempt to setup the slack integration you will be met with the error json is undefined due to the project passport-slack no longer being maintained. There is a new project that can be used or there is a fix to use profile instead. https://github.com/mjpearson/passport-slack/issues/31", + "summary": "Replace the unmaintained `passport-slack` with an updated library for Slack integration. Address the error \"json is undefined\" by utilizing the new project or by applying a fix that uses `profile` instead. \n\nTo tackle the problem, follow these steps:\n1. Research and identify a maintained alternative to `passport-slack`, such as `passport-slack-oauth2`.\n2. Update your project's dependencies to use the new library.\n3. Refactor the integration code to align with the new library’s API.\n4. If opting for the fix, locate the relevant code where `json` is called and replace it with `profile`.\n5. Test the integration thoroughly to ensure compatibility and functionality. \n\nRefer to the GitHub issue for additional context and community insights: https://github.com/mjpearson/passport-slack/issues/31.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureCodingDojo/issues/144", @@ -23227,6 +24029,7 @@ "node_id": "I_kwDOBgZk1M6KKMzx", "title": "The example used for snipXSS3.jsp does not seem correct", "body": "Hi Team,\r\n\r\nCould you please double check the example mentioned under snipXSS3.jsp?\r\n\r\nI believe the `StringEscapeUtils.escapeHtml4` already handles single quotes and It is not necessary to replace the single quote again.\r\n\r\nThanks\r\nDaniel ", + "summary": "Review the example provided in snipXSS3.jsp for accuracy. Confirm whether `StringEscapeUtils.escapeHtml4` properly handles single quotes, making additional replacements redundant. If necessary, remove the extra single quote replacement code.\n\nFirst steps:\n1. Examine the implementation of `StringEscapeUtils.escapeHtml4` to verify its handling of single quotes.\n2. Test the current behavior of the snipXSS3.jsp example with various inputs.\n3. If the function does escape single quotes, update the example by removing the redundant replacement.\n4. Document the changes and test the updated example to ensure it works as intended.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureCodingDojo/issues/149", @@ -23253,6 +24056,7 @@ "node_id": "I_kwDOBgZk1M6PZ85u", "title": "Feature request: k8s friendly handling of config.json and localUser.json", "body": "It would be really helpful if the injection of configuration variables can be fully migrated to environment variables. This would allow users to deploy these tools to a k8s environment without having to manually encrypt database, slack, and localUser hash variables. Instead, the tool will rely on injecting these configuration variables via ENV/Config maps. This would also allow users to select their password management solution - like AWS Secrets, K8s secrets or a KMS related solution. Messing around with a node encryption script isn't great", + "summary": "Implement a feature for Kubernetes-friendly handling of `config.json` and `localUser.json`. Transition all configuration variable injections to environment variables to streamline deployment in Kubernetes environments. \n\n1. Eliminate the need for manual encryption of sensitive variables such as database credentials, Slack tokens, and local user hashes.\n2. Utilize ENV/Config maps for injecting configuration variables, enhancing ease of use and security.\n3. Allow users to choose their preferred password management solution, such as AWS Secrets Manager, Kubernetes secrets, or a KMS-related solution.\n4. Remove reliance on custom node encryption scripts.\n\nFirst steps:\n- Review the current configuration injection process.\n- Identify all configuration variables in `config.json` and `localUser.json`.\n- Develop a plan for mapping these variables to environment variables.\n- Implement and test the use of ConfigMaps and Secrets in Kubernetes. \n- Update documentation to guide users on the new process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureCodingDojo/issues/150", @@ -23275,10 +24079,11 @@ "pk": 821, "fields": { "nest_created_at": "2024-09-11T21:16:13.550Z", - "nest_updated_at": "2024-09-12T03:11:25.865Z", + "nest_updated_at": "2024-09-13T16:25:43.754Z", "node_id": "I_kwDOBgZk1M6RvJHr", "title": "A shortcut for Second Degree Black Belt - Using Components with Known Vulnerabilities & Insecure Deserialization", "body": "Exploit XXE alone to view `/usr/local/tomcat/logs/catalina.2023-10-05.log` on `host2`:\r\n```\r\nargument:\r\n-DSECRET3=/code/getCode.html#eyJhbGciOiJIUz...\r\n```", + "summary": "Add a shortcut for Second Degree Black Belt by leveraging components with known vulnerabilities and insecure deserialization. Focus on exploiting XML External Entity (XXE) injection to access sensitive log files.\n\n1. Identify the target application and confirm that it allows XML input processing.\n2. Craft an XXE payload to include a reference to the log file you want to access: `/usr/local/tomcat/logs/catalina.2023-10-05.log`.\n3. Use the following argument to trigger the exploit:\n ```\n -DSECRET3=/code/getCode.html#eyJhbGciOiJIUz...\n ```\n4. Monitor the application's response for the log content to ensure access was successful.\n\nConsider testing with various XML structures and configurations to maximize the chances of exploiting the XXE vulnerability effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/SecureCodingDojo/issues/154", @@ -23301,10 +24106,11 @@ "pk": 822, "fields": { "nest_created_at": "2024-09-11T21:16:21.820Z", - "nest_updated_at": "2024-09-11T21:16:21.820Z", + "nest_updated_at": "2024-09-13T16:13:43.945Z", "node_id": "I_kwDOBbwtEM6UpsjK", "title": "Add disclaimer for vistors´ photos", "body": "... so that we can use a front page picture for 2025.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/german-owasp-day/issues/47", @@ -23327,10 +24133,11 @@ "pk": 823, "fields": { "nest_created_at": "2024-09-11T21:16:51.512Z", - "nest_updated_at": "2024-09-11T21:16:51.512Z", + "nest_updated_at": "2024-09-13T16:14:07.855Z", "node_id": "MDU6SXNzdWUyNjAzOTA0MDY=", "title": "Your cert expired", "body": "https://owaspsummit.org has an expired TLS cert.\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-summit-2017-site/issues/1", @@ -23357,6 +24164,7 @@ "node_id": "MDU6SXNzdWUyMzYxNTUyNTU=", "title": "Adapt guide to be inclusive of API testing", "body": "Web-based APIs can, for the most part, be testing using this guide. Some elements are naturally client-side and therefore irrelevant to API testing.\r\n\r\nI suggest that we create an article in section 3 or 4.1 that talks about how the guide can be used for API testing.\r\n\r\nI further suggest that we review the existing articles to ensure that the language used is appropriate for web app and APIs alike, and determine where any additions might be needed where testing for the same issue on an API involves a different process.", + "summary": "The issue proposes updating the guide to include information on API testing, noting that while the current guide is largely applicable to web-based APIs, certain client-side elements may not be relevant. It suggests creating a new article in section 3 or 4.1 specifically addressing API testing and reviewing existing articles for inclusive language that accommodates both web apps and APIs. Additionally, it highlights the need to identify where different processes may be involved for testing similar issues on APIs. To move forward, it would be beneficial to draft the new article and conduct a thorough review of the current content.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/5", @@ -23389,6 +24197,7 @@ "node_id": "MDU6SXNzdWUyMzYxNTU3ODQ=", "title": "Add guidance on how to write security test cases (or threat verification tests)", "body": "Guidance, to be added to section 5 (reporting), on how a tester can write programmatic test cases as a form of output, that developers can re-run to determine if an issue has been fixed.", + "summary": "The issue requests the addition of guidance on writing security test cases, specifically for threat verification. This guidance should be incorporated into section 5 of the reporting documentation. It aims to help testers create programmatic test cases that developers can easily re-run to verify whether security issues have been resolved. To address this, the documentation needs to outline best practices and examples for formulating these test cases effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/6", @@ -23420,6 +24229,7 @@ "node_id": "MDU6SXNzdWUyMzYxNTYwMzk=", "title": "Add Testing for Deserialisation of Untrusted Data", "body": "https://cwe.mitre.org/data/definitions/502.html", + "summary": "The issue discusses the need to implement testing for the deserialization of untrusted data, highlighting the potential security vulnerabilities associated with this process as outlined by CWE-502. It suggests that proper tests should be established to ensure that the system can handle untrusted input safely and securely. To address this, a strategy for testing deserialization methods should be developed, focusing on identifying and mitigating potential risks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/7", @@ -23452,6 +24262,7 @@ "node_id": "MDU6SXNzdWUyMzYxNTYyNjM=", "title": "Add Testing for XML External Entity (XXE) Weaknesses", "body": "", + "summary": "The issue discusses the need to implement tests for XML External Entity (XXE) vulnerabilities within the project. It highlights the importance of identifying and mitigating these security weaknesses to enhance the overall security posture. Actionable steps could include developing test cases that specifically target XXE scenarios and incorporating them into the existing testing framework.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/8", @@ -23483,6 +24294,7 @@ "node_id": "MDU6SXNzdWUyODgzOTIwODM=", "title": "Add advice regarding Human Factors", "body": "More dangers than any problems this test might find, are the ones it's left out of scope (e.g. websites without MFA, or MFA without verier impersonation resistance, etc). The most expensive security oversight after malware is the people.\r\n\r\nThere needs to be a prevalent explanation to guide users that OWASP is only going to address less than half of their problems, and they need to pay serious consideration to design decisions and overall security effectiveness - not just web vulns.\r\n\r\n*We* all know this, but users of this guide are almost certainly unaware of the other things they need to be thinking about. We owe it to them to point these out (if not also add some tests for them: for example - google-authenticator is useless if a phishing site proxies stolen credentials in real time, and 2FA in general is pointless for transactions in the face of malware, etc.)", + "summary": "The issue highlights the need to incorporate advice on Human Factors into the existing security guidelines. It emphasizes that users may overlook critical risks not addressed by OWASP, such as the absence of multi-factor authentication (MFA) or the potential for MFA to be ineffective against certain threats like phishing. The author suggests that a clear explanation should be included to inform users that OWASP's focus may not cover all security concerns, urging the need for a broader perspective on security design and effectiveness. It is recommended to not only provide guidance but also consider adding tests that reflect these overlooked areas.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/13", @@ -23513,6 +24325,7 @@ "node_id": "MDU6SXNzdWUyOTI0NDM5ODQ=", "title": "Add Testing Integrating / Third Party Services (OTG-CONFIG-011)", "body": "Web application integrating external services must be tested for well-known vulnerabilities caused by misconfigurations.\r\n\r\n- Amazon AWS bucket service: ...\r\n- Social media profile link validation: ...\r\n- CloudFlare / CDN service: ...\r\n- Unclaimed CNAME domains (DNS?): ...\r\n- ...", + "summary": "The issue discusses the need to implement testing for web applications that integrate with external services to identify vulnerabilities stemming from misconfigurations. Specific areas highlighted for testing include Amazon AWS bucket services, social media profile link validation, CloudFlare or CDN services, and unclaimed CNAME domains. To address this issue, it is necessary to establish a testing framework that covers these components to ensure security and proper configuration.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/14", @@ -23543,6 +24356,7 @@ "node_id": "MDU6SXNzdWU0OTIyNDU1NzQ=", "title": "Add an abbreviated Top-10 focused guide", "body": "As a consultant, I run into a lot of smaller groups who aren't looking to pay for the number of hours it takes to work through the full ASVS. This is especially the case when the client has multiple web apps they need to have reviewed, we end up having to submit bids for abbreviated reviews where we focus solely on the Top-10. \r\n\r\nIt would be great if there was a quick and dirty testing guide that could be offered as a bare minimum assessment or even as a way to show that they really should have a full review performed. \r\n\r\nI'd looked into the cheetsheet series, but the Top-10 cheetsheet has been recommended for deletion. While I understand the thinking behind the Top-10 being primarily for education purposes rather than a testing guide, in my experience there's a definite need for something like this.\r\n\r\n\r\nWe created a quick work program based off of the 2017 Top-10 that I'd love some feedback on and/or could be used as a jumping-off point for this.\r\n[OWASP-TOP10.xlsx](https://github.com/OWASP/OWASP-Testing-Guide-v5/files/3600989/OWASP-TOP10.xlsx)", + "summary": "The issue discusses the need for a concise testing guide focused on the Top-10 vulnerabilities for smaller groups that may not want to invest in a full ASVS review. The author highlights the challenges faced when bidding for abbreviated reviews and suggests that a bare minimum assessment could help clients recognize the value of a comprehensive review. Although the Top-10 cheatsheet has been suggested for deletion, there is a clear demand for a practical resource. The author has created a preliminary work program based on the 2017 Top-10 and seeks feedback to refine it or to use it as a foundation for further development. \n\nTo proceed, gathering feedback on the provided work program could enhance its effectiveness and utility.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/171", @@ -23576,6 +24390,7 @@ "node_id": "MDU6SXNzdWU1NTA5NTI4OTM=", "title": "Properly order document sections", "body": "**What's the issue?**\r\n\r\nThe WSTG has a legacy layout that doesn't account for some new sections and tests. I think section 4 itself has become an unnecessary layer, and the test types can be surfaced by a step for easier navigation of the guide. Article titles would benefit from the removal of redundant words, like \"Testing of.\"\r\n\r\n**How do we solve it?**\r\n\r\nReorder sections into a more sensible and futureproof format. I propose alphabetical by major sections, then minor articles. This issue is intended to invite feedback and work on the ordering collaboratively.\r\n\r\nFor ease of work, I will submit the [proposed ordering as a TOC file](https://github.com/OWASP/wstg/pull/254/files) (#254), which if approved, should replace \r\n[documet/README.md](https://github.com/OWASP/wstg/blob/master/document/README.md)\r\n\r\nTODO:\r\n\r\n- [x] Agree on chapters layout\r\n- [ ] Assign section numbers\r\n- [ ] Add test identifiers\r\n- [ ] Link-ify section names once reorganizing is complete\r\n", + "summary": "The issue addresses the need to reorganize the sections of the WSTG document to improve clarity and navigation. The current layout is outdated and includes unnecessary layers, particularly in section 4. The suggestion is to reorder the sections alphabetically by major sections followed by minor articles, and to streamline article titles by removing redundant phrases.\n\nTo move forward, feedback is encouraged, and a proposed table of contents (TOC) file has been submitted for review. The next steps include agreeing on the chapter layout, assigning section numbers, adding test identifiers, and linking section names after the reorganization is complete.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/253", @@ -23605,6 +24420,7 @@ "node_id": "MDU6SXNzdWU1NTIxMjA0MjM=", "title": "Map WSTG to ASVS and determine coverage and possible knowledge gaps", "body": "**What would you like added?**\r\nSee how the testing guide content correlates to ASVS controls and determine effective coverage. This will help ASVS users to get more context with the controls. This can than also be used in SKF when generating requirements\r\n\r\n**Would you like to be assigned to this issue?**\r\nCheck the box if you will submit a PR to add the proposed content. Please read CONTRIBUTING.md.\r\n- [x] Assign me, please!\r\n", + "summary": "The issue discusses the need to map the Web Security Testing Guide (WSTG) to the Application Security Verification Standard (ASVS) to assess coverage and identify potential knowledge gaps. This mapping aims to provide ASVS users with better context regarding the controls, which could also be beneficial for the Software Knowledge Framework (SKF) in generating requirements. To proceed, the contributor has indicated their willingness to submit a pull request to add the proposed content. \n\nTo move forward, the mapping process should be initiated and documented.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/261", @@ -23620,8 +24436,8 @@ "repository": 1119, "assignees": [], "labels": [ - 182, - 184 + 184, + 182 ] } }, @@ -23634,6 +24450,7 @@ "node_id": "MDU6SXNzdWU1NTIxMjQ1NDY=", "title": "Map OWASP SKF labs to WSTG examples", "body": "**What would you like added?**\r\nOWASP SKF has a lot of different labs with write ups on \"how to test\".\r\nWe can reference these labs in parts of the testing guide\r\n\r\nThe Lab write up can be found here:\r\nhttps://owasp-skf.gitbook.io/asvs-write-ups/\r\n\r\ni.e the following lab:\r\nhttps://owasp-skf.gitbook.io/asvs-write-ups/kbid-46-sqli-union-select\r\n\r\ncould be referenced somewhere here in the testing guide:\r\nhttps://github.com/OWASP/wstg/blob/master/document/4_Web_Application_Security_Testing/4.8_Input_Validation_Testing/4.8.5_Testing_for_SQL_Injection_OTG-INPVAL-005.md\r\n\r\nWould you like to be assigned to this issue?\r\nCheck the box if you will submit a PR to add the proposed content. Please read CONTRIBUTING.md.\r\n- [X] Assign me, please!\r\n", + "summary": "The issue suggests adding references to OWASP SKF labs within the OWASP WSTG testing guide, specifically linking lab write-ups that demonstrate various testing methodologies. An example provided highlights how to integrate a specific lab on SQL Injection into the relevant section of the testing guide. To address this, the proposed action involves reviewing the labs and identifying appropriate places in the WSTG to include these references. A commitment to submit a pull request for these changes is indicated.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/262", @@ -23651,9 +24468,9 @@ 415 ], "labels": [ + 184, 180, - 182, - 184 + 182 ] } }, @@ -23666,6 +24483,7 @@ "node_id": "MDU6SXNzdWU1NzcxMjg0NDc=", "title": "Merge REST Assessment CS into WSTG", "body": "**What would you like added?**\r\nFollowing the issue from the CheatSheets project, [Issue 367](https://github.com/OWASP/CheatSheetSeries/issues/367) discusses the move of [Rest Assessment CS](https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/REST_Assessment_Cheat_Sheet.md) to WSTG\r\n\r\nWhat do you think? If yes, this can modified to fit with the REST testing scenario", + "summary": "The issue proposes merging the REST Assessment Cheat Sheet into the Web Security Testing Guide (WSTG). It references a related discussion in another project and suggests that the content can be adjusted to better fit the REST testing context. To proceed, consider reviewing the REST Assessment Cheat Sheet and how it can be integrated into the WSTG framework.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/351", @@ -23698,6 +24516,7 @@ "node_id": "MDU6SXNzdWU2MzE1NzY1MTA=", "title": "Study of API Testing", "body": "**What would you like to happen?**\r\nAPIs come in different flavors, REST, SOAP, GraphQL, etc.\r\nTo simply say API Testing, that's a little bit ludicrous, and be broken down better. Issue #267 should adapt according to the plan that will be described in this issue.\r\n\r\nWhat do you think? I believe this can handle a call, or maybe trying to plan it out and then combine our ideas in one thread to see what would work best.\r\n\r\nPinging the API Security team to get their feedback on this in order to have a well fleshed out plan\r\ncc: @ErezYalon @PauloASilva @inonshk", + "summary": "The issue discusses the need to categorize API testing more effectively, given the various types such as REST, SOAP, and GraphQL. It suggests that the current approach to API testing is inadequate and should be revised. The author proposes collaborating with others to develop a comprehensive plan, potentially involving input from the API Security team for a more robust strategy. To move forward, it would be beneficial to gather feedback and consolidate ideas on the proposed categorization and testing methodologies.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/492", @@ -23727,6 +24546,7 @@ "node_id": "MDU6SXNzdWU2NDA2Mzg5MDM=", "title": "Update WSTG IDs after Merges", "body": "After #502 , we need to update the ID tables in order to remove INPV-03 and fix the later ones.\r\n\r\nShould this issue be left open till the end of 4.2 and update the tables before release?\r\n\r\nWSTG-INPV-03 was removed and merged into WSTG-CONF-06", + "summary": "The issue discusses the need to update the WSTG ID tables following a recent merge, specifically to remove the outdated ID INPV-03 and correct subsequent entries. There is a suggestion to keep the issue open until the end of version 4.2, allowing for table updates before the release. It is clear that action is required to ensure the ID tables reflect these changes accurately.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/510", @@ -23755,6 +24575,7 @@ "node_id": "MDU6SXNzdWU2NzI2Nzc4ODk=", "title": "Hackerone and Bugcrowd Links", "body": "**What would you like to happen?**\r\nWhat do you think about adding to every test scenario possible bug bounty reports that are relevant and provide value.\r\nOne example would be for file upload XSS since we are updating it: https://hackerone.com/reports/880099\r\n\r\nCC: @rbsec @kingthorin\r\n", + "summary": "The issue suggests enhancing test scenarios by incorporating relevant bug bounty reports to provide additional value. A specific example given is related to file upload XSS. It may be beneficial to review and integrate relevant reports from platforms like Hackerone and Bugcrowd into the testing documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/535", @@ -23784,6 +24605,7 @@ "node_id": "MDU6SXNzdWU3MDE5MTc2OTU=", "title": "Add new section on Domain enumeration and CT Harvesting?", "body": "I think we should add some domain lookup (ala amass etc) and cert transparency harvesting details to the guide. \r\n\r\nI'm thinking of adding this to a new section like 4.1.11 but we could fairly reasonably just rename 4.1.1 and include it there (in order to maintain some semblance of workflow/order-of-operations).\r\n\r\nSo I'm looking for:\r\n\r\n1. Thoughts on where to locate it.\r\n2. Tools and other suggestions for content.", + "summary": "The issue proposes the addition of a new section focusing on domain enumeration and Certificate Transparency (CT) harvesting to an existing guide. The author suggests either creating a new section (4.1.11) or renaming the current section (4.1.1) to incorporate this content for better workflow and organization. Feedback is sought on the optimal placement of the new information, as well as recommendations for relevant tools and content to include. \n\nTo address this, consider reviewing the current structure of the guide and proposing a logical location for the new section, along with specific tools or methodologies for domain enumeration and CT harvesting.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/570", @@ -23814,6 +24636,7 @@ "node_id": "MDU6SXNzdWU3MDg0MjQzNzc=", "title": "Re-establish content with regard to testing for race conditions", "body": "I was trying to map **Testing for Race Conditions (OWASP-AT-010)** from OTGv3 ( https://wiki.owasp.org/index.php/Testing_for_Race_Conditions_(OWASP-AT-010) ) to the latest version but I can't find a section talking about race conditions.\r\n\r\nThe only one I found is WSTG-INFO-07 where it mentions this:\r\nRace - tests multiple concurrent instances of the application manipulating the same data.\r\n\r\nBut it lacks most of the content that was previously in OTGv3.", + "summary": "The issue discusses the need to restore content related to testing for race conditions in the latest version, noting that the currently available section (WSTG-INFO-07) does not cover the topic as comprehensively as the previous version (OTGv3). It suggests that more detailed information and guidance on testing for race conditions should be integrated into the current documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/571", @@ -23843,6 +24666,7 @@ "node_id": "MDU6SXNzdWU3MjM5OTg1OTE=", "title": "Add a new test method to Testing for Credentials Transported over an Encrypted Channel", "body": "Add a new test method to Testing for Credentials Transported over an Encrypted Channel that describes an alternative test for cases where forced browsing is not possible.\r\n\r\nPlease reference the linked PR for context.\r\n\r\n> Can we discuss doing using cURL or something similar to it? Browsers can do some magic and force that (based on headers and cache mechanisms). The curl request should return a 3XX straight up with certainty.\r\n\r\n_Originally posted by @ThunderSon in https://github.com/OWASP/wstg/pull/573#discussion_r503214145_", + "summary": "A request has been made to add a new test method to the existing tests for credentials transported over an encrypted channel. This new method should cater to scenarios where forced browsing is not feasible. There is a suggestion to utilize cURL or a similar tool for testing, as it can provide a more straightforward and reliable response (3XX status) compared to browser behavior, which can be influenced by various factors like headers and caching.\n\nTo address this, consider implementing a test using cURL that effectively captures the intended behavior in these situations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/587", @@ -23873,6 +24697,7 @@ "node_id": "MDU6SXNzdWU3MjU0NjIwNzU=", "title": "XSS Reorganization", "body": "**What's the current setup?**\r\nReviewing XSS overall in the document, it's split into 2 different chapters, 7 and 11.\r\nAt the end of the day, all XSS will target users on the client-side, even if it was stored.\r\n\r\n**What would you like to happen?**\r\nMove all XSS to chapter 11, client-side testing, and have one big section for XSS and then going into more tests for other types of testing.\r\nWhat do you guys think?\r\n", + "summary": "The issue discusses the organization of content related to Cross-Site Scripting (XSS) in a document, currently divided between chapters 7 and 11. The suggestion is to consolidate all XSS information into chapter 11, which focuses on client-side testing, to create a comprehensive section dedicated to XSS before addressing other testing types. A review of the current structure and potential reorganization is implied.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/591", @@ -23903,6 +24728,7 @@ "node_id": "MDU6SXNzdWU3MjU2MzYxNzE=", "title": "Command Injection Improvements", "body": "**What would you like to happen?**\r\n- [ ] Add additional techniques for command injection\r\n- [ ] Tabularise languages and their possible command exec APIs\r\n- [ ] General refactor and format touchup\r\n", + "summary": "The issue suggests enhancing command injection capabilities by incorporating additional techniques and compiling a table that lists various programming languages alongside their potential command execution APIs. It also calls for a general refactor and formatting improvements throughout the code. To address this, the development team should focus on researching command injection techniques and organizing the information systematically.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/594", @@ -23932,6 +24758,7 @@ "node_id": "MDU6SXNzdWU3MjU3NjUzMjQ=", "title": "Improve Request Smuggling ", "body": "**What would you like to happen?**\r\nThe [test scenario](https://github.com/OWASP/wstg/blob/master/document/4-Web_Application_Security_Testing/07-Input_Validation_Testing/15-Testing_for_HTTP_Splitting_Smuggling.md) can be modified to a newer version based on all the research conducted with more focus on the main pain points of orgs and where it's working mostly.\r\nWe can reference and discuss as well the research conducted by portswigger.", + "summary": "The issue suggests enhancing the existing test scenario for Request Smuggling by updating it to reflect recent research and focusing on the primary challenges organizations face. It also proposes referencing research from PortSwigger to support the modifications. The task involves reviewing the current scenario and integrating relevant findings to improve its effectiveness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/596", @@ -23961,6 +24788,7 @@ "node_id": "MDU6SXNzdWU3MjU4MTAyNjc=", "title": "Testing for Incoming Requests INPV?", "body": "**What would you like to happen?**\r\nINPV-16, is this really a test the WSTG should consider? And if so, how is this input validation?\r\nThis feels weird quite honestly. It has to do more with privacy or in how things (\"best practices\") should work.", + "summary": "The issue raises a question regarding the relevance of INPV-16 as a test in the Web Security Testing Guide (WSTG). The author expresses uncertainty about whether this test pertains to input validation, suggesting it may be more related to privacy or best practices. It invites clarification on the necessity and categorization of this test within the guide. To address this, a review of INPV-16's alignment with input validation standards and its implications for privacy best practices may be needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/597", @@ -23990,6 +24818,7 @@ "node_id": "MDU6SXNzdWU3MjU4Mjk5MTM=", "title": "Merge of Sensitive Information and Credentials Transport in Clear Text", "body": "**What would you like to happen?**\r\nMerge ATHN-01 and CRYP-03 as they represent in their entirety the same thing, just different terms (credentials versus sensitive info).", + "summary": "The issue suggests merging two topics, ATHN-01 and CRYP-03, as they both address the same concept but use different terminology—credentials versus sensitive information. To address this, a review and consolidation of the content from both topics into a single, unified entry is needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/598", @@ -24020,6 +24849,7 @@ "node_id": "MDU6SXNzdWU3NDQ2MDM3MjQ=", "title": "Add \"CRE_ID\": \"\" in JSON checklist", "body": "Need to update the current JSON checklist with `CRE_ID`\r\n\r\n- `CRE_ID` can be added once it is finalized", + "summary": "The issue requires the addition of a `CRE_ID` field in the existing JSON checklist. This field should be included once its value is finalized. The next step is to determine the final value for `CRE_ID` before making the update.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/623", @@ -24051,6 +24881,7 @@ "node_id": "MDU6SXNzdWU3NTExNTY2MzM=", "title": "Should SSRF/LFI/RFI be relocated for v5?", "body": "See discussion on this PR: https://github.com/OWASP/wstg/pull/634", + "summary": "There is a debate on whether to relocate the SSRF (Server-Side Request Forgery), LFI (Local File Inclusion), and RFI (Remote File Inclusion) sections in the v5 version of the project. The discussion is ongoing in a linked pull request. It appears that further evaluation of the implications and potential benefits of relocating these sections is needed to reach a consensus on the best approach.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/637", @@ -24066,9 +24897,9 @@ "repository": 1119, "assignees": [], "labels": [ + 185, 179, - 182, - 185 + 182 ] } }, @@ -24081,6 +24912,7 @@ "node_id": "MDU6SXNzdWU3NTE4Mzk2MzQ=", "title": "Checklists and Merged IDs", "body": "Should we keep the IDs as normal for tests that are now merged or removed?\r\n\r\nMaybe we can set a placeholder instead, or have a special value that we can parse for later terms.\r\n\r\n`WSTG-INFO-09` is set twice, I am guessing there's a typo, just making sure it wasn't set specifically.", + "summary": "The issue raises a question about whether to retain IDs for tests that have been merged or removed, suggesting the possibility of using a placeholder or a special value for future parsing. Additionally, it points out a potential duplication of the ID `WSTG-INFO-09`, seeking clarification on whether this is a typo or intentional. To address this, a review of the ID management strategy may be necessary, as well as a check for any typographical errors in the ID assignments.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/640", @@ -24096,9 +24928,9 @@ "repository": 1119, "assignees": [], "labels": [ + 186, 179, - 182, - 186 + 182 ] } }, @@ -24111,6 +24943,7 @@ "node_id": "MDU6SXNzdWU3NTY5MzMxMDY=", "title": "Add a test for Content Security Policy headers", "body": "I haven't found any test that includes looking at the Content Security Headers.\r\nI would expect this to be included either in \r\n\r\n- `02-Configuration_and_Deployment_Management_Testing` or in \r\n- `11-Client-side_Testing` where it is closely related to `02-Testing_for_JavaScript_Execution.md`\r\n\r\nTypically, we should check for unsafe-eval and other potentially dangerous settings.\r\n\r\n", + "summary": "The issue highlights the absence of tests for Content Security Policy (CSP) headers, suggesting that it would be beneficial to include such tests in either the `02-Configuration_and_Deployment_Management_Testing` or `11-Client-side_Testing` sections. The focus should be on checking for unsafe-eval and other potentially hazardous settings in the CSP. Adding these tests will enhance security assessments related to JavaScript execution.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/654", @@ -24141,6 +24974,7 @@ "node_id": "MDU6SXNzdWU3NjYzNDkyMjk=", "title": "Chapter 4.2.2 removing gray-box testing questions for logging", "body": "**What's the issue?**\r\nAlso referring to issue: \r\nhttps://github.com/OWASP/wstg/issues/592\r\n\r\nI was looking at chapter 4.2.2 but there is also a lot items with regard to \"Logging\"\r\nin this chapter. I can imagine you do not want this removed so how can i best approach this?\r\n\r\n**How do we solve it?**\r\nI want to open a discussion on how this page should ideally look like\r\n\r\nWould you like to be assigned to this issue?\r\nCheck the box if you will submit a PR to fix this issue. Please read CONTRIBUTING.md.\r\n- [X] Assign me, please!\r\n", + "summary": "The issue discusses the presence of gray-box testing questions related to logging in chapter 4.2.2, which may need reevaluation. The author seeks to initiate a discussion on how to best structure this section while retaining relevant logging content. It is suggested that contributors consider how to approach the revisions and potentially submit a pull request for the changes.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/685", @@ -24170,6 +25004,7 @@ "node_id": "MDU6SXNzdWU3NzE1NzI1NTM=", "title": "Browser Storage Methods and Implications", "body": "Following on #696 , a discussion erupted on browser storage methods and [CLNT-12](https://github.com/OWASP/wstg/blob/master/document/4-Web_Application_Security_Testing/11-Client-side_Testing/12-Testing_Browser_Storage.md).\r\n\r\nThe discussion mainly is about different storage techniques, and their security implications. Should the WSTG handle that or not, and if it should, how should it do so?", + "summary": "The issue revolves around the need to address various browser storage methods and their associated security implications in the WSTG documentation. There is a debate on whether the WSTG should include guidance on this topic and, if so, how to effectively present that information. A potential next step would be to clarify the scope of the WSTG concerning browser storage techniques and propose a structured approach to integrate this content.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/700", @@ -24198,6 +25033,7 @@ "node_id": "MDU6SXNzdWU3Nzc0MzYzMDM=", "title": "Enhance WSTG-BUSL-09 - Upload of Malicious Files", "body": "**What's the issue?**\r\nThis is an enhancement request. *Test Upload of Malicious Files* can be enhanced through following suggestions.\r\n\r\n- [ ] 1. Filter Evasion : Add magic byte based evasion to bypass weak content validation\r\n- [ ] 2. Filter Evasion: Add metadata based malicious payloads, which can be triggered if uploaded file is included by the web server.\r\n- [ ] 3. Malicious File Contents - Other File Formats : Add attacks on SVG(XXE), HTML(XSS), GIF(XSS)\r\n- [ ] 4. Malicious File Contents - Zip : Add attack on Zip Slip\r\n- [ ] 5. Malicious File Contents : Add a new sub section on upload of configuration file such as .htaccess, web.config etc\r\n- [ ] 6. Tools - Add burpsuite extension - Upload Scanner - https://portswigger.net/bappstore/b2244cbb6953442cb3c82fa0a0d908fa\r\n\r\n\r\n**How do we solve it?**\r\nContent needs to be updated to accommodate these enhancements.\r\n\r\nWould you like to be assigned to this issue?\r\nCheck the box if you will submit a PR to fix this issue. Please read CONTRIBUTING.md.\r\n- [x] Assign me, please!\r\n", + "summary": "The issue is a request for enhancements to the WSTG-BUSL-09 guideline, specifically regarding the testing of malicious file uploads. Suggested improvements include adding various evasion techniques, expanding attack scenarios for different file formats (like SVG, HTML, GIF, and ZIP), and incorporating new sections on specific types of configuration files. Additionally, there is a recommendation to include a Burp Suite extension for enhanced upload scanning. To address this issue, the content of the guidelines needs to be updated to integrate these suggestions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/705", @@ -24228,6 +25064,7 @@ "node_id": "MDU6SXNzdWU3Nzc0Mzg3NjQ=", "title": "WSTG-CONF-03 - File Extensions Handling for Sensitive Information; sub section File Upload is ambiguous", "body": "**What's the issue?**\r\nThe subsection of WSTG-CONF-03 *File Extensions Handling for Sensitive Information*, **File Upload** is ambiguous and has content overlapping with *Test Upload of Malicious Files*. From the test objective, idea of this test case is to identify existing sensitive files in the web server via dirbusting. \r\n\r\nHowever section \"File Upload\" which talks about 8.3 file uploads, which looks more appropriate in WSTG-BUSL-09 or WSTG-BUSL-08. If the idea is to identify 8.3 files via dirbusting, I think writeup should be modified to be more precise. \r\n\r\n**How do we solve it?**\r\nRephrase or move the \"File Upload\" sub section to business logic testing. This is a minor change. I would like to wait for thoughts of moderators before making a PR.\r\n\r\nWould you like to be assigned to this issue?\r\nCheck the box if you will submit a PR to fix this issue. Please read CONTRIBUTING.md.\r\n- [ ] Assign me, please!\r\n", + "summary": "The issue concerns the ambiguity in the WSTG-CONF-03 section on *File Extensions Handling for Sensitive Information*, specifically regarding the *File Upload* subsection. It overlaps with the *Test Upload of Malicious Files* section and does not fit well within its current context. The suggestion is to either rephrase this subsection or relocate it to a more appropriate category, such as business logic testing. A minor change is proposed, pending feedback from moderators before proceeding with a pull request. \n\nTo resolve this, consider rephrasing or moving the relevant content to enhance clarity.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/706", @@ -24257,6 +25094,7 @@ "node_id": "MDU6SXNzdWU4MzMzNzg4MzQ=", "title": "Add a section for checking if the server responds with sensitive information.", "body": "**What would you like added?**\r\nWe sometimes see that developers might just serialize an entire object into JSON and returns it in a API response. Sometimes this object contains sensitive information. I personally have seen passwords and API keys being displayed. I think this will fall under the new 4.12 API Testing section.\r\n\r\nWould you like to be assigned to this issue?\r\nI might later. If someone beats me to it that will be good.\r\n", + "summary": "The issue suggests adding a section focused on checking whether the server responses contain sensitive information, such as passwords or API keys, due to improper serialization of objects into JSON. This should be incorporated into the new 4.12 API Testing section. A potential action would be to create guidelines or checks to prevent sensitive data from being exposed in API responses.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/727", @@ -24286,6 +25124,7 @@ "node_id": "MDU6SXNzdWU4MzMzNzk5Mzg=", "title": "Add a section for checking if the web application properly hashes passwords before storing them in the backend.", "body": "**What would you like added?**\r\nSometimes web applications do not hash their passwords before storing them. It **should** be rare these days but I've come across this a few times and could not find an appropriate WSTG item that matches this finding. This seems like a very important thing to be checking for especially if you have access to the source code of the application.\r\n\r\n", + "summary": "The issue highlights the need for a new section in the guidelines that addresses the importance of verifying whether web applications properly hash passwords before storing them in the backend. It points out that while it's becoming less common, there are still instances where applications fail to implement this crucial security measure. The suggestion is to create an appropriate WSTG item to ensure that this aspect is checked, particularly when source code access is available. To address this, a new guideline or checklist item should be developed focusing on password hashing practices.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/728", @@ -24315,6 +25154,7 @@ "node_id": "MDU6SXNzdWU4NDg3MzY0ODM=", "title": "Cryptography - encryption vs signatures", "body": "**What's the issue?**\r\n[Section 4.9.4](https://github.com/OWASP/wstg/blob/master/document/4-Web_Application_Security_Testing/09-Testing_for_Weak_Cryptography/04-Testing_for_Weak_Encryption.md) (Testing for Weak Encryption) includes a few notes about digital signatures. \r\n\r\nI'm curious if the section is meant to include signature information, since to me (a novice) encryption and signatures are two different but related aspects of cryptography.\r\n\r\nHow I noticed this is the Basic Security Checklist has a recommendation for \"asymmetric encryption\" methods and _separately_ (subsequent bullet), a note about using RSA for signatures. \r\n\r\nI can't tell if the first bullet is *only* about encryption, or if would apply to signatures as well.\r\n\r\n**How do we solve it?**\r\nIf the section is in fact only talking about encryption and not signatures, would it be wise to add a section about signatures?\r\n\r\nOtherwise, if it meant to cover both, maybe some clarity/additions around acceptable signature algorithms would be good.\r\n\r\n**Would you like to be assigned to this issue?**\r\n\r\n- [X] Assign me, please!\r\nWith some guidance as to what the intent of that section is, I could do a PR.\r\n", + "summary": "The issue raised concerns the clarity of Section 4.9.4 on digital signatures in the context of weak encryption. The author is uncertain whether the section exclusively addresses encryption or if it also encompasses signatures, noting that the Basic Security Checklist treats them as separate topics. The suggestion is to either add a dedicated section on signatures if they are not covered or to enhance the existing content with information on acceptable signature algorithms. A pull request could be prepared once the intent of the section is clarified.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/734", @@ -24344,6 +25184,7 @@ "node_id": "MDU6SXNzdWU5Mzg5NzkyNjQ=", "title": "Merge IDNT-04 & IDNT-05?", "body": "Hey! Please explain why WSTG-IDNT-05 is separated from WSTG-IDNT-04? Is a strict account format in a web application (eg John Doe - j.doe) is a vulnerability?", + "summary": "The issue raises a question about the separation of two items, WSTG-IDNT-05 and WSTG-IDNT-04, and seeks clarification on whether a strict account format, such as \"John Doe - j.doe,\" is considered a vulnerability in web applications. It suggests that there may be a need to discuss the rationale behind the distinct listings and possibly reassess their categorization or relationship. Further investigation into the criteria for identifying vulnerabilities related to account formats may also be warranted.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/764", @@ -24373,6 +25214,7 @@ "node_id": "MDU6SXNzdWU5NDg1ODYyNzk=", "title": "Adding prototype parameter pollution", "body": "Is it useful to add a section on prototype pollution (not to be confused with http parameter pollution)? This vulnerability is a couple of years old but I cant find any reference in any OWASP project to it. It has one very interesting instance in a Kibana RCE exploit ([CVE-2019-7609](https://research.securitum.com/prototype-pollution-rce-kibana-cve-2019-7609/)). \r\n\r\nIf you feel this is missing I can give it a go to write something. Please let me know where it might fit in.\r\n\r\n[Portswigger reference](https://portswigger.net/daily-swig/prototype-pollution-the-dangerous-and-underrated-vulnerability-impacting-javascript-applications)", + "summary": "The issue discusses the potential addition of a section on prototype pollution to an OWASP project, emphasizing that this vulnerability has been overlooked despite its relevance, particularly in relation to a notable Kibana RCE exploit. The author expresses willingness to contribute content if there is consensus on its inclusion and seeks guidance on where it would be appropriate to fit into the existing documentation. A review of current content and a decision on the structure may be needed to address this gap.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/775", @@ -24401,6 +25243,7 @@ "node_id": "MDU6SXNzdWU5NzQ1OTE5NDc=", "title": "Adding section to address JavaScript Service Workers", "body": "Would it be feasible to add a section on JS Service Workers (and the methodolgy for analysis of JS Service Workers) to the guide ? This would focus mostly on client side testing but would also cover push and pull mechanisms. \r\n\r\nWould you like to be assigned to this issue?\r\nCheck the box if you will submit a PR to add the proposed content. Please read CONTRIBUTING.md.\r\n- [x] Assign me, please!\r\n", + "summary": "The issue suggests adding a section to the guide specifically focused on JavaScript Service Workers, including methodologies for their analysis. The proposed content would primarily address client-side testing while also discussing push and pull mechanisms. The requester expresses willingness to be assigned to this issue and intends to submit a pull request to implement the changes. \n\nTo move forward, the necessary content should be drafted and aligned with the existing guide structure.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/787", @@ -24431,6 +25274,7 @@ "node_id": "MDU6SXNzdWU5ODAxNjQwODE=", "title": "Adding file producer metadata leakage", "body": "**File producer metadata leakage**\r\nIf the web application generates files (e.g. pdf), using exiftools (or other techniques), the Producer can be found which created it. If the producer is known, e.g. `Producer: iText 2.1.7` or `Producer: mPDF 7.1.7` the attacker can discover whether any CVEs exist for such a tool leading to successful exploitation.\r\n\r\nAlthough I was able to find https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/01-Information_Gathering/, but it doesn't reference this specific need in my opinion. Therefore, I'd like to extend the Information Gathering with a new content.\r\n\r\nWould you like to be assigned to this issue?\r\nno", + "summary": "The issue discusses the risk of file producer metadata leakage in web applications that generate files, such as PDFs. It highlights that identifiable producer information (e.g., `iText 2.1.7`, `mPDF 7.1.7`) can expose the application to vulnerabilities if known CVEs exist for those tools. The author suggests that the existing information gathering resources, like those from OWASP, do not adequately cover this specific concern and proposes extending the content to address it. \n\nTo address this issue, it may be beneficial to create detailed guidelines or best practices for handling file metadata securely to prevent potential exploitation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/788", @@ -24459,6 +25303,7 @@ "node_id": "I_kwDOBXDIEs5N-4Bb", "title": "Inappropriate content (Testing for Cross Site Script Inclusion)", "body": "`4.11.13 Testing for Cross Site Script Inclusion` is not a WEB vulnerability, is a Web Browser application vulnrability. The WSTG is a framework for WEB aplications testing and 4.11 is for client side applications but no client applications. This vulnerability only can affect to specific browsers like as ie6, in this case you need include all other cve vulnerabilities or all other browsers like as how to broken the origin policy from ie8.\r\n\r\nThis document shouldn't be.", + "summary": "The issue raises a concern regarding the classification of the vulnerability described in section `4.11.13 Testing for Cross Site Script Inclusion`, arguing that it pertains to web browser applications rather than web applications themselves. It suggests that the testing framework should focus on vulnerabilities specific to web applications. The author believes that the section should be revised or removed, as it pertains to browser-specific vulnerabilities, which could lead to confusion. To address this, a reassessment of the content and scope of this section is necessary to ensure it aligns with the framework's intent.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/954", @@ -24488,6 +25333,7 @@ "node_id": "I_kwDOBXDIEs5WAo7o", "title": "Upload a malicious symlink in a zip file", "body": "I was checking [this](https://hackerone.com/reports/1439593) HackerOne report with a $29000 bounty and I found it very interesting. This is different than Zip Slip. In case of Zip Slip we can inject `..` in the file path so we can extract our file in a wrong place. In this report, the attacker crafts a malicious symlink to `/etc/passwd` when the backend extracts it `untar_zxf` function only changes the permissions and extract the symlink as is, so the attacker was able to read the `passwd` file! \r\nI believe we need to add this technique to the WSTG!", + "summary": "The issue discusses a vulnerability related to uploading malicious symlinks in zip files, drawing attention to a specific HackerOne report that highlights an attacker’s ability to read sensitive files like `/etc/passwd`. This is distinct from the Zip Slip vulnerability, as it involves the extraction of symlinks without proper permission checks. The suggestion is to include this technique in the Web Security Testing Guide (WSTG) to address the security concern. It may be necessary to review and enhance the guidelines for handling symlinks during file extraction processes.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/991", @@ -24517,6 +25363,7 @@ "node_id": "I_kwDOBXDIEs5aHdGw", "title": "Review and update content about SameSite cookies", "body": "There are few areas of the guide that don't really take into account the current state of `SameSite` cookies:\r\n\r\n* The Testing Cookie Attributes guide says that \"most\" browsers\" default to `Lax`, but this isn't true for Firefox or Safari.\r\n* The Cross-Site Request Forgery guide doesn't mention it at all, and the PoCs given won't work in *some* browsers because of it.\r\n* The Cross-Site Script Inclusion guide doesn't mention it at all, and the PoC given won't work in *some* (all? #954) browsers because of it.\r\n\r\nThere might also be some other areas that I've missed?", + "summary": "The issue highlights the need to review and update the documentation regarding SameSite cookies, as certain guides contain outdated or inaccurate information. Specific concerns include:\n\n- The Testing Cookie Attributes guide incorrectly states that \"most\" browsers default to `Lax`, while this is not true for Firefox or Safari.\n- The Cross-Site Request Forgery guide fails to mention SameSite cookies, leading to potential issues with the provided Proofs of Concept (PoCs).\n- The Cross-Site Script Inclusion guide also neglects to address SameSite cookies, impacting the functionality of its PoCs in various browsers.\n\nIt is suggested to thoroughly review these guides and potentially others for accuracy regarding SameSite cookie implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1005", @@ -24547,6 +25394,7 @@ "node_id": "I_kwDOBXDIEs5aRES0", "title": "Adding sections (description, impact...) for reports", "body": "Hello,\r\n\r\n**Context:**\r\nI work as a penetration tester and when we finish an audit we have to hand in a report. In such report we aim a lesser technical profile (managers) and a more technical profile (developers or other pentesters). When writing the details of each vulnerability, we are expected to provide a description, impact, exploitation steps, remediation recommendations, references...\r\n\r\n**The issue:**\r\n[Here ](https://owasp.org/www-project-web-security-testing-guide/stable/5-Reporting/README) in section 3.2 Finding details, the guide recommends what to report (title, impact, description and so on). However, **these sections are not present in the vulnerabilites themselves**: taking [IDOR](https://owasp.org/www-project-web-security-testing-guide/stable/4-Web_Application_Security_Testing/05-Authorization_Testing/04-Testing_for_Insecure_Direct_Object_References) as an example, while it's true that there is a summary section, there is no high-level description, no impact, no remediation recommendations and so on.\r\n\r\n**What I request:**\r\nAfter being in a couple of companies, I can see that each of them uses an internal database with such sections, **so that pentesters dont need to write them every single time they report**. Could more sections be added?\r\n\r\nI can make the work if you let/ want me to. The idea would be to add to each vulnerability at least:\r\n+ High level description\r\n+ Impact taking the CIA triad in consideration\r\n+ Remediation recommendations\r\n\r\n\r\nThanks and keep it up with the amazing kob =)", + "summary": "The issue discusses the lack of comprehensive sections in the vulnerability reports provided in the OWASP Web Security Testing Guide. It highlights the need for additional details such as high-level descriptions, impact assessments, and remediation recommendations for each vulnerability, which are crucial for different audience profiles in penetration testing reports. The author suggests that these sections could streamline the reporting process for pentesters by reducing repetitive work. They offer to assist in adding these sections to enhance the guide. \n\nTo address this issue, it would be beneficial to consider implementing the proposed sections for each vulnerability in the guide.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1006", @@ -24575,6 +25423,7 @@ "node_id": "I_kwDOBXDIEs5a0XrY", "title": "Adding Test for Outdated and Unsupported Components", "body": "**What would you like added?**\r\n- Add Test for Vulnerable Technologies Using\r\n\r\nWould you like to be assigned to this issue?\r\n- [x] Assign me, please!\r\n", + "summary": "The issue requests the addition of tests to identify outdated and unsupported components within the project, specifically focusing on vulnerable technologies. It is suggested that a systematic approach be implemented to assess the technology stack for potential security risks. Assignment for this task has been indicated.", "state": "open", "state_reason": "reopened", "url": "https://github.com/OWASP/wstg/issues/1017", @@ -24606,6 +25455,7 @@ "node_id": "I_kwDOBXDIEs5mo_tx", "title": "WSTG-INFO include up-to-date tooling and examples", "body": "**What would you like added?**\r\nSome more modern tools should be included.\r\nFor example: https://twitter.com/ReconOne_bk/status/1660972532395065345\r\n- waybackurls\r\n- unfurl\r\n- gau\r\n\r\nI'm sure I have other tweets bookmarked, I'll add them as I find them :wink:", + "summary": "The issue suggests updating the WSTG-INFO section by including more modern tools and examples. Specifically, it mentions tools like \"waybackurls,\" \"unfurl,\" and \"gau\" as additions. The contributor plans to share more tools in the future. To address this issue, it would be beneficial to review and incorporate these suggested tools into the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1058", @@ -24636,6 +25486,7 @@ "node_id": "I_kwDOBXDIEs5qbHOn", "title": "Possible error in 4.4.6 Testing for Browser Cache Weaknesses", "body": "**What's the issue?**\r\nIn 4.4.6 Testing for Browser Cache Weaknesses in the paragraph Browser History it says:\r\n\"The Back button can be stopped from showing sensitive data. This can be done by:\r\n\r\n Delivering the page over HTTPS.\r\n Setting Cache-Control: must-revalidate\r\n\"\r\nIn a recent penetration test I had the issue that by using the back button after logging out I could retrieve the information previously shown again. I then modified the headers in the response to see if the advise from above works. At least in Firefox it did not, for other browsers I did not test it. After setting Cache-Control: must-revalidate the information could still be retrieved in the described way.\r\n\r\n**How do we solve it?**\r\nAccording to https://stackoverflow.com/questions/49547/how-do-we-control-web-page-caching-across-all-browsers the correct setting for the headers are:\r\n\r\nCache-Control: no-cache, no-store, must-revalidate\r\nPragma: no-cache\r\nExpires: 0\r\n\r\nAt least in my tests I could confirm that these headers resolved the issue.\r\n\r\nWould you like to be assigned to this issue?\r\nCheck the box if you will submit a PR to fix this issue. Please read CONTRIBUTING.md.\r\n- [ ] Assign me, please!\r\n", + "summary": "The issue discusses a potential error in the guidance provided for securing sensitive data when using the back button in browsers, specifically in section 4.4.6 regarding Browser Cache Weaknesses. The original recommendation of using \"Cache-Control: must-revalidate\" did not prevent retrieval of sensitive information after logging out, as tested in Firefox. The user suggests that the headers should be updated to \"Cache-Control: no-cache, no-store, must-revalidate\", along with \"Pragma: no-cache\" and \"Expires: 0\" to effectively address the problem. A resolution involves updating the documentation to reflect these recommended header settings.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1077", @@ -24665,6 +25516,7 @@ "node_id": "I_kwDOBXDIEs5th0vW", "title": "Ambiguity in the summary related to the test case Testing for Bypassing Authorization Schema", "body": "These 2 lines are mentioned in the summary of the Testing for Bypassing Authorization Schema test case, but the access level has no meaning if the user has no session and no authentication.\r\n\r\n1- Is it possible to access that resource even if the user is not authenticated?\r\n\r\n2- Is it possible to access that resource after the log-out?\r\n\r\nIn your opinion, 2 items include Testing for Bypassing Authentication Schema test case?\r\nPlease explain to me how access after logout or without authentication can be included in this test case while this test case examines the level of access of authenticated users horizontally and vertically?\r\nThis seems to be in conflict with the Direct page request in Testing for Bypassing Authentication Schema", + "summary": "The issue highlights confusion regarding the summary of the \"Testing for Bypassing Authorization Schema\" test case, particularly concerning the relevance of access levels for unauthenticated users and those who have logged out. It questions whether the ability to access resources without authentication or after logging out should be considered in the context of this test case, which is primarily focused on the access levels of authenticated users. The author suggests that this perspective may conflict with the criteria for evaluating direct page requests within the same testing framework. \n\nTo address this ambiguity, it may be necessary to clarify the distinctions between authorization and authentication in the test cases and possibly revise the summary to align with the intended focus on authenticated user access levels.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1089", @@ -24694,6 +25546,7 @@ "node_id": "I_kwDOBXDIEs52yO07", "title": "Adding \"How to Test\" for the WSTG Checklist [Work in Progress]", "body": "The WSTG checklist is of utmost importance to penetration testers because it provides the blueprint for what to test. Its current format provides the following:\r\n\r\n**ID, Test Name, Objectives, Status, Notes.** \r\n\r\nID, Test Name, and Objectives have been compiled from WSTG. \r\n\r\nWSTG is a companion and reference manual that I go to often for the pentest I do for my clients. For each test, it also has a section called \"How to Test\" which provides a few actionables for the specific vulnerability.\r\n\r\nI propose to include this section (as a new column) in the checklist. The new format will be:\r\n\r\n**ID, Test Name, Objectives, How to Test, Status, Notes.** \r\n\r\nI know it might be tricky because for some tests, the \"How to Test\" is very extensive, while for some it is very succinct. However, I take on the challenge of making a uniform \"How to Test\" in the checklist across all tests. \r\n\r\nEach \"How to Test\" entry will be 3-5 short bullet points, as in the image below:\r\n\r\n![image](https://github.com/OWASP/wstg/assets/17044663/9755e488-950d-43c9-bd36-8af9d1c4b119)\r\n\r\nWhat do you all think about this?\r\n", + "summary": "The issue discusses the proposal to enhance the WSTG checklist by adding a \"How to Test\" column, which would provide actionable steps for specific vulnerabilities. Currently, the checklist includes columns for ID, Test Name, Objectives, Status, and Notes. The new format aims to standardize the \"How to Test\" entries into concise bullet points, acknowledging the challenge of varying lengths of existing content. Feedback on this proposal is sought, and contributors may need to consider how to create uniform entries for all tests.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1109", @@ -24722,6 +25575,7 @@ "node_id": "I_kwDOBXDIEs54fpVP", "title": "Invisible code parts - bright blue text on the blue background", "body": "**What's the issue?**\r\nText color used in combination with the background color makes it almost invisible.\r\n\r\n**How do we solve it?**\r\nUse other color (than presented at the screenshots below) for these code parts - not bright blue. \r\n\r\nWould you like to be assigned to this issue?\r\nNo\r\n\r\n# Repro steps:\r\n1. Go to https://owasp.org/www-project-web-security-testing-guide/v41/4-Web_Application_Security_Testing/07-Input_Validation_Testing/18-Testing_for_Server_Side_Template_Injection\r\n2. Scroll down to see code parts (examples)\r\n\r\n# Screenshots ❌\r\n![image](https://github.com/OWASP/wstg/assets/18367606/252727db-a094-4cf9-9eca-5c0c527dd092)\r\nor\r\n![image](https://github.com/OWASP/wstg/assets/18367606/2b687db8-5e9f-4316-90e0-1d29ac9e971b)\r\n\r\n# Additional information\r\nPlease notice that current 'effect of colors' make it unuseful - better result is even if we use white text with uniformed background:\r\n![image](https://github.com/OWASP/wstg/assets/18367606/4aa583a1-8ac2-4e60-bcb5-a9f77aaa6f12)\r\n\r\n\r\nBest wishes,", + "summary": "The issue identified involves the text color used for code parts on a webpage, which is bright blue on a blue background, rendering it nearly invisible. To resolve this, it is suggested that a different color be chosen for the code parts to enhance visibility. \n\nTo replicate the issue, one can visit a specific URL and scroll down to observe the code examples. The report indicates that even using white text on a uniform background would significantly improve readability. A color change is needed to rectify the visibility problem.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1116", @@ -24751,6 +25605,7 @@ "node_id": "I_kwDOBXDIEs56hK9G", "title": "Check List Translation to french and arabic", "body": "**Get WSTG checklist for the french and arabe Communities**\r\nI'm good at english and i conducts my audits in english, i needed a french version of the checklist to joint it to a report", + "summary": "The issue requests the translation of the WSTG checklist into French and Arabic to accommodate non-English speaking communities. The user emphasizes the need for a French version to include in a report. It may be helpful to identify individuals who can assist with these translations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1122", @@ -24766,8 +25621,8 @@ "repository": 1119, "assignees": [], "labels": [ - 181, - 187 + 187, + 181 ] } }, @@ -24780,6 +25635,7 @@ "node_id": "I_kwDOBXDIEs6AmzbE", "title": "Update Privilege Escalation's Weak SessionID Section", "body": "**What's the issue?**\r\n[Privilege Escalation guide](https://github.com/OWASP/wstg/blob/a6c4017207a10ddccbb4eeb770b0ce8873008060/document/4-Web_Application_Security_Testing/05-Authorization_Testing/03-Testing_for_Privilege_Escalation.md) contains a section at the end called Weak SessionID that feels a bit out of place and could take a rewrite\r\n\r\n**How do we solve it?**\r\nSee if the section is still required, if it can be merged into other sections, or if it simply needs a better rewrite to belong to the rest of the content\r\n\r\nWould you like to be assigned to this issue?\r\nCheck the box if you will submit a PR to fix this issue. Please read CONTRIBUTING.md.\r\n- [ ] Assign me, please!\r\n", + "summary": "The issue highlights that the \"Weak SessionID\" section in the Privilege Escalation guide appears misplaced and may require a rewrite. The proposed solution includes evaluating the necessity of the section, considering merging it with other content, or rewriting it for better coherence with the overall document. A contributor is interested in addressing this issue through a pull request.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1130", @@ -24809,6 +25665,7 @@ "node_id": "I_kwDOBXDIEs6MasqS", "title": "Review and update subdomain takeover content", "body": "**What would you like added?**\r\nThe [subdomain takeover guide](https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/02-Configuration_and_Deployment_Management_Testing/10-Test_for_Subdomain_Takeover) is a bit dated and focuses on manually reviewing for takeovers. Testers validate manually but test automatically due to the wide variety of fingerprints and the error-prone nature of manual validation for takeovers.\r\n\r\nI think the guide should be updated to focus on the typical workflow and the current state of tooling.\r\n\r\nThe typical workflow is along the lines of:\r\n1. Subdomain enumeration\r\n2. Detection via (open-source) tools\r\n3. Validation (since it's a false-positive prone process)\r\n\r\nThere are also quite a few tools, many unmaintained and most miss a lot of instances. [Here's an engineering post analyzing existing tools while developing a new one.](https://www.stratussecurity.com/post/the-ultimate-subdomain-takeover-tool)\r\n\r\nThis would require a relatively major rewrite of the page but I'm happy to help. Opinions welcome!\r\n\r\n**Would you like to be assigned to this issue?**\r\n- [x] Assign me, please!\r\n", + "summary": "The subdomain takeover guide is outdated and primarily emphasizes manual review methods, which are error-prone and less efficient. The suggestion is to update the guide to reflect the current testing workflow, which includes subdomain enumeration, detection using open-source tools, and validation to address the high rate of false positives. A comprehensive rewrite is proposed to incorporate these elements and improve the overall utility of the guide. Input from the community is encouraged, and assistance in the update process is offered.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1145", @@ -24840,6 +25697,7 @@ "node_id": "I_kwDOBXDIEs6PZu40", "title": "Need suggestion on issues mapping with wstg: Authenticator strength and Identity control issue", "body": "**What would you like added?**\r\nI'm not sure how these issues can be mapped into the current wstg. I may have missed it or the current wstg does not cover it yet? I don't know. Any suggestion is appreciate.\r\n\r\nBoth of these issues are not about MFA. They're the main authentication factor and identity.\r\n\r\n**1. Is there an issue that covers App using weak Authenticator Types (Improper main authentication factor)?**\r\n\r\nFor example, the app logins using username and birthday. It's very weak and may be able to map into [WSTG-ATHN-04](https://github.com/OWASP/wstg/blob/master/document/4-Web_Application_Security_Testing/04-Authentication_Testing/04-Testing_for_Bypassing_Authentication_Schema.md) or [WSTG-ATHN-07](https://github.com/OWASP/wstg/blob/master/document/4-Web_Application_Security_Testing/04-Authentication_Testing/07-Testing_for_Weak_Password_Policy.md)? [NIST](https://pages.nist.gov/800-63-3/sp800-63b.html) is more coverage about the Permitted Authenticator Types. I currently could not map this issue to the wstg. Only related part I found is in 04-07 that a authentication vulnerable to brute-force is an issue. However, if the app uses Biometric as a main authenticator (which fail to comply NIST) then I could not find any wstg topic to map with.\r\nI'm not even sure if this is something others raise as an issue during a pentest process?\r\n\r\n**2. Application allows changing email without password confirmation in authenticated session.**\r\n\r\nThis is related to [Authenticated Password Changes](https://github.com/OWASP/wstg/blob/master/document/4-Web_Application_Security_Testing/04-Authentication_Testing/09-Testing_for_Weak_Password_Change_or_Reset_Functionalities.md#authenticated-password-changes) that requires a user to re-authenticate before allowing them to change the password. However, when the app allows changing the Email without re-authentication and the email can be used for \"password reset\", the attacker can bypass the need to provide the old password by changing the email and jump to a reset password flow instead. Most applications rely on Email but I think it's actually \"identity\" in the broader term.\r\n\r\nIn my opinion, changing anything related to the user identity (email, name, passport id, etc.) should also require re-authentication. But I can't find anything quite like it in the checklist.\r\n\r\n**Would you like to be assigned to this issue?**\r\nCheck the box if you will submit a PR to add the proposed content. Please read CONTRIBUTING.md.\r\n- [ ] Assign me, please!\r\n\r\nNot sure yet how it will come out. Willing to contribute although I'm not the English main.", + "summary": "The issue discusses the need for better mapping of specific authentication and identity control concerns within the current Web Security Testing Guide (WSTG). Two primary areas of focus are highlighted:\n\n1. **Weak Authenticator Types**: The author questions how to address applications that use weak authentication factors, such as a username and birthday, which may not align with NIST guidelines. They seek guidance on relevant WSTG sections that could cover this issue, particularly regarding the use of biometric authenticators that may not comply with standards.\n\n2. **Changing Email Without Password Confirmation**: The second concern is about applications allowing users to change their email addresses without requiring re-authentication. The author argues that this could lead to security vulnerabilities, especially if the email is used for password resets, and suggests that any changes related to user identity should necessitate re-authentication.\n\nTo address these issues, it may be beneficial to review the WSTG documentation for potential gaps and propose new content or revisions that cover these specific concerns more effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/wstg/issues/1151", @@ -24869,6 +25727,7 @@ "node_id": "MDU6SXNzdWU1NzAzMDA2NDU=", "title": "Adding a simple https://web.archive.org/ module", "body": "Hello team I think we should also add a Wayback module so that the user can scan for the crawled data on Wayback and some juicy information could be extracted from it which could help him in performing further pentest.", + "summary": "Implement a Wayback module to enable users to scan for crawled data on the Internet Archive. This feature will allow users to extract valuable information for enhancing penetration testing efforts.\n\nBegin by researching the Wayback Machine API to understand how to retrieve archived data. Next, design the module interface to allow users to input URLs they wish to analyze. Implement the backend logic to fetch and parse data from the Wayback Machine, ensuring proper error handling for cases where data may not be available. Finally, test the module thoroughly for various scenarios to ensure reliability and efficiency in data retrieval.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/206", @@ -24897,6 +25756,7 @@ "node_id": "MDU6SXNzdWU4Mjg3MTM3NTY=", "title": "Handle Exception", "body": "We should use `except exception as e` instead of only `except exception`, otherwise it often difficult to debug the code. Like in issue #398 it shows module not exists because of import error in that module. This often deceiving and sometimes take bit of our time.\r\n_________________\r\n**OS**: `Backbox`\r\n\r\n**OS Version**: `7.1`\r\n\r\n**Python Version**: `3.8.5`\r\n", + "summary": "Refactor exception handling to use `except Exception as e` instead of `except Exception`. This change will improve debugging by providing access to the exception instance, allowing for better error tracing. \n\n### Steps to Address the Issue:\n1. Identify all instances of `except Exception` in the codebase.\n2. Modify each instance to `except Exception as e` to capture the exception details.\n3. Implement logging of exception messages, e.g., `logging.error(f\"An error occurred: {e}\")`, to facilitate debugging.\n4. Test the changes to ensure that exceptions are logged correctly and that no new issues are introduced.\n5. Review the related code (e.g., see issue #398) for any specific cases where the lack of exception detail has caused confusion.\n\n### Environment Details:\n- Ensure testing is conducted on Backbox OS version 7.1 with Python 3.8.5 to maintain compatibility.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/401", @@ -24925,6 +25785,7 @@ "node_id": "I_kwDOBU27ls5Hn9M8", "title": "MySQL not working", "body": "Hi I am trying to configure Nettacker with mysql database, but it is not working. here is my config\r\n\r\n```\r\ndef nettacker_database_config():\r\n return {\r\n \"DB\": \"mysql\",\r\n \"DATABASE\": \"nettacker\",\r\n \"USERNAME\": \"owasp\",\r\n \"PASSWORD\": \"mypasswd\",\r\n \"HOST\": \"localhost\",\r\n \"PORT\": \"3306\"\r\n }\r\n```\r\n\r\nAnd here is the error that appear whenever submit new scan\r\n```\r\n\r\nProcess Process-1:1:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.8/multiprocessing/process.py\", line 315, in _bootstrap\r\n self.run()\r\n File \"/usr/lib/python3.8/multiprocessing/process.py\", line 108, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/root/Nettacker/core/scan_targers.py\", line 114, in start_scan_processes\r\n options.targets = expand_targets(options, scan_unique_id)\r\n File \"/root/Nettacker/core/targets.py\", line 97, in expand_targets\r\n multi_processor(\r\n File \"/root/Nettacker/core/scan_targers.py\", line 72, in multi_processor\r\n remove_old_logs(\r\n File \"/root/Nettacker/database/db.py\", line 130, in remove_old_logs\r\n session.query(HostsLog).filter(\r\nAttributeError: 'bool' object has no attribute 'query'\r\n\r\n```\r\nNote: the mysql database is empty there is no table in it", + "summary": "Configure Nettacker to work with MySQL database by ensuring the database is properly initialized and tables are created. \n\n1. **Check MySQL Service**: Ensure that the MySQL service is running on `localhost` and is accessible on port `3306`. You can verify this by using `mysql -u owasp -p` from the command line.\n\n2. **Create the Database and Tables**: Since the indicated MySQL database is empty, initialize it by creating the required database and tables. Execute the following SQL commands:\n ```sql\n CREATE DATABASE nettacker;\n USE nettacker;\n CREATE TABLE HostsLog (\n id INT AUTO_INCREMENT PRIMARY KEY,\n host VARCHAR(255) NOT NULL,\n log TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n );\n ```\n\n3. **Verify Database Configuration**: Ensure that your database configuration in the `nettacker_database_config` function is correct. Confirm that the credentials (`USERNAME`, `PASSWORD`) are accurate and have the necessary permissions to access the `nettacker` database.\n\n4. **Understand the Error Message**: The error message indicates an `AttributeError` related to the `session` object. This suggests that the session may not be set up correctly. Investigate the database connection initialization code to ensure it properly creates a session.\n\n5. **Test the Configuration**: After implementing the above steps, rerun the Nettacker scan to see if the issue persists. If it does, consider enabling more detailed logging to troubleshoot further.\n\nBy following these steps, you should be able to resolve the MySQL configuration issue with Nettacker.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/555", @@ -24955,6 +25816,7 @@ "node_id": "I_kwDOBU27ls5SiVjH", "title": "multi threading issue", "body": "Hi,\r\n\r\n1. There is an issue with the queues. When a request is dependent on another request, all queued requests will constantly check for the same variable in the temp events of the database. So we need to generate those requests after the variable is available and stop consuming the CPU.\r\n2. if threads of one subprocess are finished, the subprocess will die. Maybe it's better to let the subprocesses help other subprocesses with their threads. \r\n\r\nThanks, Ali.\r\n", + "summary": "Address the multi-threading issue as follows:\n\n1. Optimize queue handling by implementing a mechanism to prevent all queued requests from continuously checking the same variable in the database's temp events. Instead, create a listener or callback system that triggers the queued requests only when the required variable becomes available. This will reduce CPU consumption significantly.\n\n2. Modify the subprocess management strategy. Instead of allowing a subprocess to terminate when its threads finish, implement a thread-pooling mechanism that allows idle threads in completed subprocesses to assist other running subprocesses. This will enhance resource utilization and improve overall throughput.\n\nBegin by analyzing the current queue management system to identify how requests are being queued and processed. Next, consider implementing an event-driven architecture or utilizing conditions/locks to manage the availability of the required variable efficiently. For subprocess management, evaluate existing thread management practices and design a pooling mechanism that facilitates thread sharing among subprocesses.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/595", @@ -24985,6 +25847,7 @@ "node_id": "I_kwDOBU27ls5SiV4H", "title": "Standardize yaml files", "body": "Hi,\r\n\r\n1. Yaml files don't share good standard practices. We need to clean up a little bit and document them.\r\n2. Yaml files need to support while and for loops.\r\n\r\n\r\nBests, Ali.", + "summary": "Standardize YAML files. \n\n1. Review the current YAML files to identify inconsistencies and areas lacking best practices. Create a set of guidelines for structuring and formatting YAML files, ensuring clarity and uniformity across the repository.\n2. Implement support for `while` and `for` loops in the YAML files. Research existing libraries or tools that can facilitate this functionality and integrate them into the YAML parsing process.\n\nFirst steps:\n- Conduct a thorough audit of all existing YAML files.\n- Draft a style guide outlining standard practices.\n- Explore available libraries for loop support in YAML and evaluate their compatibility with the current system.\n- Create a checklist to ensure all YAML files adhere to the new standards after modifications.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/596", @@ -25011,6 +25874,7 @@ "node_id": "I_kwDOBU27ls5SiWEo", "title": "Not enough CI tests", "body": "Hi,\r\n\r\nCurrently, we are facing a lot of issues, we need to have unit tests for\r\n\r\n1. API server & Database\r\n2. Yaml files functionality\r\n3. core functionalities\r\n\r\nBests, Ali.", + "summary": "Add unit tests for the following components to improve CI coverage:\n\n1. **API Server & Database**:\n - Create test cases to validate API endpoints.\n - Mock database interactions to ensure tests run in isolation.\n - Use a testing framework like Jest or Mocha for JavaScript/Node.js, or Pytest for Python.\n\n2. **Yaml Files Functionality**:\n - Write tests to verify the correctness of YAML parsing and processing logic.\n - Ensure edge cases (e.g., malformed YAML) are covered.\n\n3. **Core Functionalities**:\n - Identify critical business logic and implement unit tests.\n - Focus on edge cases and potential failure points.\n\n**First Steps**:\n- Review existing codebase to identify untested areas.\n- Set up a testing framework if not already in place.\n- Write and execute initial test cases for the API server and database interactions.\n- Gradually expand coverage to include YAML functionality and core features, ensuring tests are integrated into the CI pipeline for continuous validation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/597", @@ -25037,6 +25901,7 @@ "node_id": "I_kwDOBU27ls5SiWSn", "title": "Refactor restAPI with best practices", "body": "Hi,\r\n\r\nThe API server doesn't follow best practices and needs to be refactored and adjusted with the WebUI.\r\n\r\nBests, Ali.", + "summary": "Refactor the REST API to adhere to best practices. \n\n1. Review the current API endpoints and identify inconsistencies with RESTful principles (e.g., proper use of HTTP methods, resource naming conventions).\n2. Align the API structure and responses with the WebUI requirements to ensure seamless integration.\n3. Implement versioning for the API to maintain backward compatibility.\n4. Improve error handling by standardizing error response formats and status codes.\n5. Optimize authentication and authorization mechanisms, possibly integrating OAuth2 or JWT.\n6. Document the API using OpenAPI Specification for better developer guidance.\n\nStart by analyzing the current API documentation and listing out the existing endpoints. Then, create a roadmap for the refactoring process, focusing on high-impact areas first.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/598", @@ -25067,6 +25932,7 @@ "node_id": "I_kwDOBU27ls5TVsUc", "title": "database lock issue", "body": "Hi,\r\n\r\nin some modules when there are a lots of events submitting to db, i got a lot of failure error with database lock, we need to re-architect the whole thing, it use a lots of IO, unnecessary memory, CPU, etc.\r\n\r\nBests. Ali.", + "summary": "Re-architect the database handling in modules experiencing high event submission rates to address database lock issues. Investigate the root causes of the locking, which may involve analyzing transaction isolation levels and optimizing the database schema.\n\nConsider implementing the following first steps:\n1. Profile the database queries to identify long-running transactions and heavy IO operations.\n2. Review current transaction strategies and explore the possibility of using more granular locking mechanisms or optimistic concurrency control.\n3. Optimize database indices to reduce the time taken for read/write operations.\n4. Implement asynchronous processing for event submissions to minimize blocking.\n5. Evaluate the use of connection pooling to efficiently manage database connections and reduce overhead.\n\nFocus on reducing unnecessary memory and CPU consumption while improving overall performance and scalability.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/609", @@ -25093,6 +25959,7 @@ "node_id": "I_kwDOBU27ls5TlHhb", "title": "AttributeError: '_UnixSelectorEventLoop' object has no attribute '_ssock'", "body": "➜ Nettacker git:(issue/609) ✗ python3 nettacker.py -i google.com --profile http -t 1100 -M 5\r\n\r\n\r\n```\r\nAttributeError: '_UnixSelectorEventLoop' object has no attribute '_ssock'\r\n self._remove_reader(self._ssock.fileno())\r\n File \"/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py\", line 92, in close\r\n self._remove_reader(self._ssock.fileno())\r\nAttributeError: '_UnixSelectorEventLoop' object has no attribute '_ssock'\r\n File \"/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py\", line 99, in _close_self_pipe\r\n self._remove_reader(self._ssock.fileno())\r\nAttributeError: '_UnixSelectorEventLoop' object has no attribute '_ssock'\r\n File \"/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py\", line 99, in _close_self_pipe\r\n self._remove_reader(self._ssock.fileno())\r\n self._remove_reader(self._ssock.fileno())\r\nAttributeError: '_UnixSelectorEventLoop' object has no attribute '_ssock'\r\n File \"/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py\", line 99, in _close_self_pipe\r\n self._close_self_pipe()\r\n File \"/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py\", line 99, in _close_self_pipe\r\nAttributeError: '_UnixSelectorEventLoop' object has no attribute '_ssock'\r\nAttributeError: '_UnixSelectorEventLoop' object has no attribute '_ssock'\r\n File \"/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py\", line 99, in _close_self_pipe\r\n self._remove_reader(self._ssock.fileno())\r\n File \"/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py\", line 99, in _close_self_pipe\r\n```", + "summary": "Investigate the `AttributeError: '_UnixSelectorEventLoop' object has no attribute '_ssock'` occurring in the Nettacker project when executing `python3 nettacker.py -i google.com --profile http -t 1100 -M 5`. \n\n1. Identify the context of the error:\n - The error arises from the `asyncio` library, particularly in the `selector_events.py` file.\n - The `_ssock` attribute is expected to be present in the event loop but is missing.\n\n2. Examine the version of Python being used:\n - Confirm compatibility of the Python version (3.9 in this case) with the Nettacker codebase.\n\n3. Review the implementation of the `_UnixSelectorEventLoop`:\n - Check if `_ssock` is a custom implementation that may not be initialized properly or if it's a bug in the event loop handling.\n\n4. Check for dependencies:\n - Ensure all dependencies are up-to-date and compatible with the current version of Python.\n\n5. Look for existing issues or discussions:\n - Search the GitHub repository and forums for similar issues to see if there are any known fixes or workarounds.\n\nPossible first steps:\n- Modify the event loop initialization in the Nettacker code to ensure `_ssock` is properly defined.\n- Consider switching to a different event loop, such as `asyncio.ProactorEventLoop`, if applicable.\n- Run tests to replicate the error consistently and check for changes in behavior after any modifications.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/616", @@ -25115,21 +25982,22 @@ "pk": 884, "fields": { "nest_created_at": "2024-09-11T21:18:54.547Z", - "nest_updated_at": "2024-09-11T21:18:54.547Z", + "nest_updated_at": "2024-09-13T16:26:43.443Z", "node_id": "I_kwDOBU27ls5YTH3r", "title": "creating pyproject.toml file", "body": "Setting up a pyproject.toml file for the project", - "state": "open", - "state_reason": "", + "summary": "", + "state": "closed", + "state_reason": "completed", "url": "https://github.com/OWASP/Nettacker/issues/633", "number": 633, "sequence_id": 1481407979, "is_locked": false, "lock_reason": "", - "comments_count": 4, - "closed_at": null, + "comments_count": 5, + "closed_at": "2024-09-13T03:56:57Z", "created_at": "2022-12-07T09:14:28Z", - "updated_at": "2024-08-04T08:18:38Z", + "updated_at": "2024-09-13T03:56:57Z", "author": 449, "repository": 1122, "assignees": [ @@ -25149,6 +26017,7 @@ "node_id": "I_kwDOBU27ls5vqVJz", "title": "Implement requests rate limiter", "body": "Why there isn't any feature to limit the rate of the requests? \r\nThe lack of this feature make this tool less useful because of the too many requests bans.", + "summary": "Implement a requests rate limiter for the tool to prevent excessive requests that lead to bans. Address the absence of this feature as it significantly impacts usability.\n\n1. Define the desired rate limit (e.g., requests per minute).\n2. Research existing rate limiting algorithms, such as Token Bucket or Leaky Bucket.\n3. Create a middleware or utility function to track and manage request counts.\n4. Integrate the rate limiter into the request-handling workflow.\n5. Implement error handling to manage cases when limits are exceeded.\n6. Test the rate limiter under various scenarios to ensure reliability and effectiveness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/727", @@ -25179,6 +26048,7 @@ "node_id": "I_kwDOBU27ls5xyg4U", "title": "Enhancement: Provide More Command Examples for New Users", "body": "In your Python code (nettacker.py), consider adding more command examples. This helps new users who may find it difficult to create the right commands when they use your code for the first time. It makes it easier for them to write the correct code.\r\n\r\n**Like that:-** \r\n\r\nnettacker -L en -v -o results.txt -i target.com -m ftp_brute,ssh_brute -u admin -P passwords.txt -g 21,22 --start-api --api-host 0.0.0.0 --api-port 5000\r\n", + "summary": "Add more command examples to nettacker.py for new users. Include a variety of scenarios to demonstrate different functionalities. Ensure examples cover common use cases, such as specifying input files, setting modes, and configuring API options.\n\n**First Steps:**\n1. Review the current documentation and identify sections lacking examples.\n2. Create a list of common commands and their parameters.\n3. Write clear, concise command examples that users can easily replicate.\n4. Incorporate comments in the code explaining each part of the command.\n5. Test each example to ensure accuracy and functionality.\n6. Update the README or documentation file with the new examples for easy access.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/740", @@ -25205,6 +26075,7 @@ "node_id": "I_kwDOBU27ls5xz-wp", "title": "Avoid warning message when running app. ", "body": "Hello, I'm testing the application and I want to run the API in a production environment. \r\n\r\n```\r\npython nettacker.py --start-api --api-host 0.0.0.0 --api-access-key HERECOMESTHESECRETKEY\r\n\r\n```\r\n\r\nhow can i run this command with gunicorn in production? ", + "summary": "Avoid warning messages when running the application in production. Use Gunicorn to serve the API effectively. \n\n1. Install Gunicorn if not already done:\n ```bash\n pip install gunicorn\n ```\n\n2. Run the application with Gunicorn using the following command:\n ```bash\n gunicorn --bind 0.0.0.0:8000 nettacker:app --access-logfile - --error-logfile -\n ```\n Replace `nettacker:app` with the appropriate module and application name.\n\n3. Ensure that your application is set up to handle production configurations, specifically regarding logging and error handling.\n\n4. Consider using environment variables for sensitive information, such as `API_ACCESS_KEY`, to avoid hardcoding them in your command.\n\n5. Test the application in a staging environment before deploying to production to confirm that no warnings appear and the application behaves as expected. \n\nFollow these steps to avoid warnings and ensure a smooth production deployment.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/741", @@ -25233,6 +26104,7 @@ "node_id": "I_kwDOBU27ls52YK1E", "title": "When the program is doing a scan, there is a malfunction in the APIs", "body": "Please describe the issue or question and share your OS and Python version.\r\n_________________\r\n**OS**: `x`\r\nUbuntu\r\n**OS Version**: `x`\r\n22.04.2 LTS\r\n**Python Version**: `x`\r\nPython 3.10.12\r\n\r\nHello, i made a UI in react to call the apis.\r\nWhen nettacker it's not scanning all the api works perfectly but when a scan is in progress the api responds almost every time \"database error!\" but every once in a while the api works. ", + "summary": "Investigate malfunctioning APIs during program scans. Confirm the operating system and Python version as Ubuntu 22.04.2 LTS and Python 3.10.12, respectively. \n\n1. Reproduce the issue by running a scan with nettacker and monitor API responses.\n2. Check logs for any database connection errors or timeouts when the scan is in progress.\n3. Analyze the API request handling during scanning to identify potential bottlenecks.\n4. Review database performance and optimize queries if necessary.\n5. Test with a lower scan intensity to see if the issue persists.\n6. Consider setting up a more robust error handling mechanism within the API to capture and log detailed error messages.\n\nImplement these steps to diagnose and address the database error issue during scans.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/762", @@ -25263,6 +26135,7 @@ "node_id": "I_kwDOBU27ls523eE7", "title": "Data missing on /logs/get_json?target=", "body": "Please describe the issue or question and share your OS and Python version.\r\n_________________\r\nOS: x\r\nUbuntu\r\nOS Version: x\r\n22.04.2 LTS\r\nPython Version: x\r\nPython 3.10.12\r\n\r\nHi! in my program that i made to use nettacker i would like to replicate a function of the real UI , the function where you go on crawler and click on a host and inside there is a html table with all the events of that host. i currently did it with /logs/get_json?target=192.168.254.12, in response to that i get this json:\r\n\r\n`[{\"scan_unique_id\": \"xlcbpjisyjvzbcgidjzjgqxuiztvtqkg\", \"target\": \"192.168.254.12\", \"port\": \"22\", \"event\": \"host: 192.168.254.12 method: tcp_connect_send_and_receive ports: '22' timeout: 3.0 conditions: ssh: - -OpenSSH_\", \"json_event\": {\"timeout\": 3.0, \"host\": \"192.168.254.12\", \"ports\": \"22\", \"method\": \"tcp_connect_send_and_receive\", \"response\": {\"ssl_flag\": false, \"conditions_results\": {\"ssh\": [\"-OpenSSH_\"]}}}}, {\"scan_unique_id\": \"xlcbpjisyjvzbcgidjzjgqxuiztvtqkg\", \"target\": \"192.168.254.12\", \"port\": \"3389\", \"event\": \"host: 192.168.254.12 method: tcp_connect_send_and_receive ports: '3389' timeout: 3.0 conditions: open_port: - ''\", \"json_event\": {\"timeout\": 3.0, \"host\": \"192.168.254.12\", \"ports\": \"3389\", \"method\": \"tcp_connect_send_and_receive\", \"response\": {\"ssl_flag\": false, \"conditions_results\": {\"open_port\": [\"\"]}}}}, {\"scan_unique_id\": \"xlcbpjisyjvzbcgidjzjgqxuiztvtqkg\", \"target\": \"192.168.254.12\", \"port\": \"3306\", \"event\": \"host: 192.168.254.12 method: tcp_connect_send_and_receive ports: '3306' timeout: 3.0 conditions: mysql: - is not allowed to connect to this MySQL server\", \"json_event\": {\"timeout\": 3.0, \"host\": \"192.168.254.12\", \"ports\": \"3306\", \"method\": \"tcp_connect_send_and_receive\", \"response\": {\"ssl_flag\": false, \"conditions_results\": {\"mysql\": [\"is not allowed to connect to this MySQL server\"]}}}}, {\"scan_unique_id\": \"xlcbpjisyjvzbcgidjzjgqxuiztvtqkg\", \"target\": \"192.168.254.12\", \"port\": \"8081\", \"event\": \"host: 192.168.254.12 method: tcp_connect_send_and_receive ports: '8081' timeout: 3.0 conditions: http: - HTTP/1.1 400\", \"json_event\": {\"timeout\": 3.0, \"host\": \"192.168.254.12\", \"ports\": \"8081\", \"method\": \"tcp_connect_send_and_receive\", \"response\": {\"ssl_flag\": true, \"conditions_results\": {\"http\": [\"HTTP/1.1 400\"]}}}}, {\"scan_unique_id\": \"xlcbpjisyjvzbcgidjzjgqxuiztvtqkg\", \"target\": \"192.168.254.12\", \"port\": \"\", \"event\": \"host: 192.168.254.12 method: socket_icmp timeout: 3.0 conditions: host: 192.168.254.12 response_time: 0.0008175373077392578 ssl_flag: false\", \"json_event\": {\"timeout\": 3.0, \"host\": \"192.168.254.12\", \"method\": \"socket_icmp\", \"response\": {\"ssl_flag\": false, \"conditions_results\": {\"host\": \"192.168.254.12\", \"response_time\": 0.0008175373077392578, \"ssl_flag\": false}}}}, {\"scan_unique_id\": \"xlcbpjisyjvzbcgidjzjgqxuiztvtqkg\", \"target\": \"192.168.254.12\", \"port\": \"8081\", \"event\": \"allow_redirects: false headers: User-Agent: Nettacker 0.3.2 TRENT method: get ssl: false timeout: 3.0 url: https://192.168.254.12:8081 conditions: log: '404' status_code: - '404'\", \"json_event\": {\"timeout\": 3.0, \"headers\": {\"User-Agent\": \"Nettacker 0.3.2 TRENT\"}, \"allow_redirects\": false, \"ssl\": false, \"url\": \"https://192.168.254.12:8081\", \"method\": \"get\", \"response\": {\"conditions_results\": {\"status_code\": [\"404\"], \"log\": \"404\"}}}}]`\r\n\r\nthe problem here is that the module name is missing, if we go on the nettacker UI and press on a host we see a table with the same data that i have in the json but there is also the module name, which i can't figure out where is located.\r\n\r\n![tablebett](https://github.com/OWASP/Nettacker/assets/118749390/13dc0e37-e987-4593-9928-6ff10e18839b)\r\n\r\nWhere i can also find the module name?\r\n\r\nWhen i list all the hosts they all have a module_name array with all the modules but then when i get the json i cannot determine which scan module a event has used, thank you very much!", + "summary": "Investigate missing module names in JSON response from `/logs/get_json?target=` endpoint. Ensure compatibility with Ubuntu 22.04.2 LTS and Python 3.10.12.\n\n1. Examine the JSON structure returned by the endpoint. Identify the `scan_unique_id` and correlate it with the module names listed in the host data.\n2. Check Nettacker’s codebase, focusing on the section that handles event logging. Look for how module names are assigned to events during scans.\n3. Search for relevant documentation or comments within the code that could clarify the relationship between scans, events, and module names.\n4. If module names are stored in a different location, determine how to retrieve them using the `scan_unique_id`.\n5. Consider adding debug statements to the relevant code sections to capture and log module names during event creation.\n\nNext steps:\n- Review the Nettacker source code to locate the event logging mechanism.\n- Analyze any API documentation or comments in the code for insights on module name assignment.\n- Test retrieving module names using different endpoints or parameters to see if they can be linked to events.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/766", @@ -25289,6 +26162,7 @@ "node_id": "I_kwDOBU27ls56ivKL", "title": "Reorganize Profile Section in Web Module for Improved Usability", "body": "## Issue Summary:\r\nThe current profile section in the web module appears cluttered with multiple protocols and products, making it challenging for users to navigate and select profiles efficiently. This issue proposes reorganizing the profile section to enhance user experience.\r\n\r\n## Proposed Changes:\r\n\r\n- **Separate Section for Profiles:**\r\n Introduce a separate section for profiles to declutter the existing profile selection interface.\r\n\r\n- **Improved Categorization:**\r\n Group profiles based on protocols and products to make it more intuitive for users.\r\n\r\n#### Screenshot:\r\n![Screenshot_20231226_010406](https://github.com/OWASP/Nettacker/assets/50882504/13d1b55e-e8bf-49ee-91d0-2a6cf0cf9cc7)\r\n\r\n## Implementation Plan:\r\n\r\n- Identify the relevant HTML templates and JavaScript files responsible for rendering the web interface.\r\n- Introduce modifications to separate the profile section and improve categorization.\r\n\r\n## Benefits:\r\n\r\n- Enhances user experience by providing a more organized and user-friendly profile selection interface.\r\n- Simplifies the process of selecting profiles, especially when dealing with multiple protocols and products.\r\n\r\n", + "summary": "Reorganize the Profile Section in the Web Module for Improved Usability\n\n1. **Create a Separate Section for Profiles:**\n - Implement a distinct area within the web module dedicated solely to profiles, reducing clutter in the current selection interface.\n\n2. **Enhance Categorization:**\n - Group profiles logically based on their associated protocols and products, making navigation more intuitive for the user.\n\n3. **Review Current Implementation:**\n - Identify and analyze the existing HTML templates and JavaScript files that render the profile selection interface to understand the current structure.\n\n4. **Modify User Interface:**\n - Execute the necessary changes to introduce the new profile section and improved categorization.\n\n5. **Test the Changes:**\n - Conduct usability testing to ensure the new layout meets user needs and streamlines the profile selection process.\n\n6. **Iterate Based on Feedback:**\n - Gather user feedback post-implementation and make adjustments as needed to further enhance usability.\n\nBy following these steps, improve the user experience significantly by creating a more organized and user-friendly profile selection interface, thus simplifying the profile selection process across multiple protocols and products.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/781", @@ -25319,6 +26193,7 @@ "node_id": "I_kwDOBU27ls6BDNAL", "title": "Same 3 Ports and Never ANY Results", "body": "Maybe it's because I'm new to bug bounty hunting/pentesting, but no matter what I do, I seem to get the same ports scanned with no results to show in the report. I have tried the following modules to no success (my screenshot here shows 1 example of its failure):\r\n\r\nsubdomain_scan\r\nsubdomain_takeover_vuln\r\nadmin_scan\r\nwordpress_version_scan\r\n\r\nWhat am I missing or doing wrong? How do I use this tool correctly, or is it currently just broken? \r\n\r\nBecause after completing the scan shown in the photograph, the report showed nothing of value (to my knowledge anyway) and almost no information altogether.\r\n\r\n{\"timeout\": 3.0, \"host\": \"arkoselabs.com\", \"ports\": \"443\", \"method\": \"tcp_connect_send_and_receive\", \"response\": {\"ssl_flag\": false, \"conditions_results\": {\"http\": [\"HTTP/1.1 400\", \"Content-Length: 915\", \"Content-Type: \", \"Server: \"]}}}\r\n\r\nThat's as much as I figured out from the wordpress_version_scan report ^\r\n_________________\r\n\r\n**OS**: `Kali Linux`\r\n\r\n**OS Version**: `kali-linux-2023.4`\r\n\r\n**Python Version**: `3.11.8`\r\n\r\n![nettacker_fail](https://github.com/OWASP/Nettacker/assets/46658915/517c8fc5-bcc3-4940-81b3-31f7f2ccfcb4)\r\n\r\n", + "summary": "Investigate the issue of scanning only three ports with no results. \n\n1. Confirm the setup: Ensure that the environment is correctly configured with Kali Linux 2023.4, Python 3.11.8, and that all required dependencies for the scanning tools are installed.\n\n2. Review the modules used:\n - Investigate the `subdomain_scan`, `subdomain_takeover_vuln`, `admin_scan`, and `wordpress_version_scan` modules for their parameters and expected outputs. Ensure they are compatible with the target.\n\n3. Verify target accessibility: Check if the target host (arkoselabs.com) is accessible and that port 443 is open. Use tools like `nmap` to confirm the open ports on the target.\n\n4. Analyze the error response: Note that the scan returned an HTTP 400 error. Investigate the conditions that lead to this response, such as incorrect request formats or missing headers.\n\n5. Increase scan scope: Try scanning additional ports beyond 443 to gather more information. Modify the configuration to include a wider range of ports.\n\n6. Enable verbose logging: Use verbose or debug flags in the scanning tool to get more detailed output, which can help identify where the process is failing.\n\n7. Consult documentation: Review the official documentation for the scanning tools to ensure they are being used correctly. Look for example configurations or common pitfalls.\n\n8. Engage the community: If the problem persists, seek help from forums or GitHub discussions related to the tools in use. Provide detailed information about the scans performed and the responses received.\n\n9. Test with alternative tools: As a final step, consider testing other security scanning tools to see if they yield better results on the same target. \n\nBy following these steps, troubleshoot the scanning issue and increase the likelihood of obtaining meaningful results.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/817", @@ -25334,8 +26209,8 @@ "repository": 1122, "assignees": [], "labels": [ - 191, - 193 + 193, + 191 ] } }, @@ -25348,6 +26223,7 @@ "node_id": "I_kwDOBU27ls6F93pw", "title": "Vulnerbilities in profile not found", "body": "when i start --profile vulnerabilities the nettacker give me not found error", + "summary": "Address the \"profile not found\" error when using the `--profile vulnerabilities` option in Nettacker. \n\n1. Verify that the profile \"vulnerabilities\" exists in the Nettacker profiles directory. Check the directory structure and confirm that the profile file is present.\n2. If the profile is missing, create a new profile or download the existing one from the Nettacker repository.\n3. Ensure that the profile is correctly formatted and adheres to the expected structure. Validate the syntax and required fields within the profile.\n4. Check for any typos in the command line input or profile name. Confirm that the command is correctly specified as `nettacker --profile vulnerabilities`.\n5. Review the Nettacker documentation for any updates or changes related to profile usage.\n\nImplement these steps to resolve the issue and enable the successful utilization of the \"vulnerabilities\" profile.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/830", @@ -25365,8 +26241,8 @@ 450 ], "labels": [ - 191, - 194 + 194, + 191 ] } }, @@ -25379,6 +26255,7 @@ "node_id": "I_kwDOBU27ls6H2-sp", "title": "i keep gitting the smae error it making me pissed", "body": "when i runned python nettacker.py -i owasp.org -s -m port_scan it gave me a error saying [2024-05-05 01:30:55][X] file \"/home/kali/Nettacker/.data/results/results_2024_05_05_01_30_53_ggruaklqqz.html\" is not writable! ", + "summary": "Troubleshoot the \"file not writable\" error when executing `python nettacker.py -i owasp.org -s -m port_scan`. \n\n1. Verify that the directory `/home/kali/Nettacker/.data/results/` exists. If it does not exist, create it using:\n ```bash\n mkdir -p /home/kali/Nettacker/.data/results/\n ```\n\n2. Check the file permissions of the results directory. Ensure you have write permissions by running:\n ```bash\n ls -ld /home/kali/Nettacker/.data/results/\n ```\n\n3. If permissions are insufficient, modify them using:\n ```bash\n chmod +w /home/kali/Nettacker/.data/results/\n ```\n\n4. If the directory exists and permissions are correct, check if the disk is full or if there are any filesystem issues that might prevent writing.\n\n5. Consider running the command with elevated privileges if necessary:\n ```bash\n sudo python nettacker.py -i owasp.org -s -m port_scan\n ```\n\n6. Review the script for any hardcoded paths or configurations that might affect file writing behavior.\n\n7. Ensure that the Nettacker application is not running concurrently, which might lock the file.\n\nBy following these steps, you should be able to resolve the issue and successfully run the port scanning command.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/837", @@ -25396,8 +26273,8 @@ 450 ], "labels": [ - 191, - 194 + 194, + 191 ] } }, @@ -25406,10 +26283,11 @@ "pk": 894, "fields": { "nest_created_at": "2024-09-11T21:19:09.852Z", - "nest_updated_at": "2024-09-12T02:31:54.272Z", + "nest_updated_at": "2024-09-13T04:23:10.612Z", "node_id": "I_kwDOBU27ls6VkNQS", "title": "[Question] Do we have plan to support Terraform", "body": "Do we have plan to support Terraform ?\r\n\r\nmake terraform provider plugin of this tool\r\n\r\n", + "summary": "Evaluate the feasibility of implementing Terraform support. \n\n1. Research existing Terraform provider plugins to understand their architecture and functionality.\n2. Define the scope of the Terraform provider you need to create. Identify which resources and data sources should be supported.\n3. Review the Terraform Plugin SDK documentation to familiarize yourself with the development process.\n4. Set up a development environment for creating the Terraform provider plugin.\n5. Draft a plan outlining the API interactions and resource management required for your tool.\n6. Begin the implementation by creating the provider schema and the necessary CRUD (Create, Read, Update, Delete) functions.\n7. Write unit tests to ensure the provider functions correctly and adheres to Terraform's standards.\n8. Document the usage of the provider for end-users.\n\nClarify the timeline and resources needed for development of the Terraform provider plugin.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Nettacker/issues/893", @@ -25425,8 +26303,8 @@ "repository": 1122, "assignees": [], "labels": [ - 191, - 193 + 193, + 191 ] } }, @@ -25439,6 +26317,7 @@ "node_id": "MDU6SXNzdWUyMjI0Mzg5OTg=", "title": "Traditional Chinese Version", "body": "Hi, this book is useful for gopher to follow the best practice of secure programming. Would you mind I translate this book to Traditional Chinese? I will link to origin repository and follows the license.\r\n\r\nthanks", + "summary": "A request has been made to translate a book on secure programming into Traditional Chinese. The requester is willing to link to the original repository and comply with the licensing terms. It would be helpful to confirm whether this translation can proceed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/16", @@ -25469,6 +26348,7 @@ "node_id": "MDU6SXNzdWUyMjMyNTUyOTI=", "title": "Can I translate this book into Chinese ?", "body": "I want to translate this book into Chinese, It's a pretty good book. \r\n", + "summary": "A user is seeking permission to translate a book into Chinese, expressing appreciation for its content. To address this request, the appropriate permissions or guidelines for translation need to be clarified.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/19", @@ -25497,6 +26377,7 @@ "node_id": "MDU6SXNzdWUyMjUyNzcyNzE=", "title": "Addition of Glossary for certain terms", "body": "Hi all,\r\n\r\nI'd like to propose the addition of a glossary for certain terms that may be misinterpreted or need additional explanations to be unambiguously interpreted by readers.\r\nThis would also require a review of the complete document to ensure the uses of such words are correct according to their glossary definition.\r\nThis is particularly important in some specific cases like crypto, where terms are sometimes misused and there can be confusion as to their meaning and the instances of their use across the document could be linked to the definition in the glossary for fast reference.\r\n\r\nHere's an example of what glossary entries could look like:\r\n\r\n_encoding_ - function which transforms input into a different representation without the use of a key. These are reversible and do not provide real security because if the algorithm is known, their output can be reversed.\r\n\r\n_hash_ - may be a reference to a hashing function or its output. \r\n\r\n_hashing/hash function_ - cryptographically-secure function which transforms input into fixed-length output, also known as trapdoor or one-way function. Their output cannot be \"reversed\", in the sense of retrieving back the input information from the output, because its fixed-length property does not retain the original information, but it can be \"guessed\" by attempting all possible inputs until we get the same output.\r\n\r\n_encryption_ - process by which input data (plaintext) is transformed into encrypted data (ciphertext) via the application of a secret key. The output of an encryption process may be reversed (decrypted) using a key.\r\n\r\n_KDF_ (or _Key Derivation Function_) - In this document, when referring to a KDF, we mean a function which takes a user password as input and derives a key which can be used for storage and authentication or a high entropy key for use with symmetric encryption algorithms. This is **not** a hash function, but a construction around a hash function to make it more resistant to attacks such as brute-force, rainbow-tables, etc. A simple example to understand what is meant by construction is [PBKDF2](https://github.com/golang/crypto/blob/master/pbkdf2/pbkdf2.go) which uses salting and iterations to increase the cost of breaking the hash to the attacker.\r\n\r\nThe crypto section would then be revised to use these terms appropriately.", + "summary": "The proposal suggests creating a glossary for specific terms that may lead to misinterpretation, particularly in the context of cryptography. This glossary would clarify definitions and ensure consistency throughout the document. It is emphasized that a thorough review of the document is necessary to align term usage with the glossary definitions. The proposal includes examples of potential glossary entries, such as definitions for encoding, hashing, encryption, and key derivation functions, highlighting their distinct characteristics and purposes. To proceed, the document should be reviewed and updated to incorporate these definitions accurately.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/32", @@ -25525,6 +26406,7 @@ "node_id": "MDU6SXNzdWU3MDU2MDIyNzk=", "title": "Needs compliant & non-compliant code samples", "body": "We should follow the CERT C/C++/Java style and show non-compliant and compliant code for each example, esp in the Validation section of the document. For example, [this is the CERT C coding rule for integer wraps](https://wiki.sei.cmu.edu/confluence/display/c/INT30-C.+Ensure+that+unsigned+integer+operations+do+not+wrap).\r\n\r\nI'm willing to help provide these, many can be pulled from other locations", + "summary": "The issue highlights the need for both compliant and non-compliant code samples that adhere to the CERT C/C++/Java coding standards, particularly in the Validation section of the documentation. The suggestion includes using specific examples, such as the CERT rule regarding integer wraps. Assistance is offered to contribute these samples, indicating that many can be sourced from existing materials. \n\nTo address this, it would be beneficial to compile relevant code examples that illustrate both compliant and non-compliant practices.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/71", @@ -25553,6 +26435,7 @@ "node_id": "MDU6SXNzdWU3MDU2MDU3ODQ=", "title": "Validation of integers needs a review", "body": "I'm just picking on integers right now, because that's top of mind, but:\r\n\r\n- `strconv.Atoi` is almost never correct; [I cover this in a few different talks](https://github.com/lojikil/kyoto-go-nihilism)\r\n- We list `strconv.ParseInt` but not `ParseUint` (thanks @disconnect3d for pointing that out)\r\n- We need to explain that many things take flows that are `int` but can pun those flows to `int32` or `int64` or `uint` flavors without the compiler complaining, but can lead to various issues. I spoke about this vis-a-vis Kubernetes [in my talk at OWASP Global AppSec DC](https://github.com/lojikil/kubectlfish).", + "summary": "The issue highlights the need to review the handling of integer validation in the codebase. It points out that using `strconv.Atoi` is frequently incorrect and suggests that `strconv.ParseUint` should be included alongside `strconv.ParseInt`. Additionally, there is a concern about the implicit conversions between different integer types, which can lead to problems despite not raising compiler errors. To address these concerns, a review of the integer validation methods and their documentation is necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/72", @@ -25579,6 +26462,7 @@ "node_id": "MDU6SXNzdWU3MDU2MTIzMzg=", "title": "XSS Section is good, but clarify `text/template` ", "body": "We mention that `text/template` won't save you from XSS, but the [documentation explicitly states that it is unsafe for handling user input](https://golang.org/pkg/text/template/#pkg-overview). We should clarify that the threat model for `text/template` does not handle user input, and that `html/template` is only safe iff passed user data as parameters (e.g. we need to avoid Template Injection) ", + "summary": "The issue highlights the need for clarification regarding the use of `text/template` in relation to XSS vulnerabilities. While it is noted that `text/template` is not safe for handling user input, the documentation needs to explicitly reflect that the threat model does not account for user input. Additionally, it emphasizes that `html/template` is safe only when user data is passed as parameters, cautioning against Template Injection. It suggests revising the documentation to clearly communicate these points.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/73", @@ -25605,6 +26489,7 @@ "node_id": "MDU6SXNzdWU3MDU2MTM0MTI=", "title": "SQLi and templates", "body": "Most of the non-compliant Go SQL code I see is actually abuse of templates, rather than string joins. We should show non-compliance via templating as well, so that developers do not think that templating can necessarily save them here.", + "summary": "The issue raises concerns about the misuse of templates in Go SQL code, highlighting that many instances of non-compliance stem from this misuse rather than from string joins. It suggests the need to demonstrate non-compliance in templating to ensure developers understand that using templates does not inherently prevent SQL injection vulnerabilities. A possible action could be to implement checks or guidelines addressing this issue in the codebase.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/74", @@ -25633,6 +26518,7 @@ "node_id": "MDU6SXNzdWU3MDU2MjU2NjM=", "title": "Incorrect Cryptography recommendations", "body": "In the [Cryptography Practices](https://github.com/OWASP/Go-SCP/tree/master/src/cryptographic-practices) section we say the following:\r\n\r\n> MD5 is the most popular hashing algorithm, but securitywise BLAKE2 is considered the strongest and most flexible.\r\n\r\nand \r\n\r\n> To fight back, the hashing function should be inherently slow, using at least 10,000 iterations.\r\n\r\nMD5 is deprecated and insecure; we should never list it as a compliant sample. Additionally, iterated hashing is incorrect, and. we should recommend key stretching or some other mechanism for handling these scenarios. Lastly, there are no mentions of HMACs, which are the correct solution to authentication issues. \r\n\r\nI can help massage these sections with some input from our cryptographers.\r\n\r\nAdditionally, we say:\r\n\r\n> Please **not** that slowness is something desired on a cryptographic hashing algorithm.\r\n\r\nWhich should be \"Please note...\"\r\n", + "summary": "The issue highlights incorrect cryptography recommendations in the Cryptography Practices section. It points out that MD5 should not be listed as a compliant sample due to its insecure status, and iterated hashing is not the correct approach; instead, key stretching should be recommended. The lack of mention of HMACs for authentication is also noted as a significant oversight. Additionally, a typographical error in the text is identified. \n\nTo address these issues, a revision of the relevant sections with input from cryptography experts is suggested.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/75", @@ -25659,6 +26545,7 @@ "node_id": "MDU6SXNzdWU5Mjc4OTQ1MDQ=", "title": "Wrong reference links for hashing algorithms in validation-and-storage.md", "body": "In \" Go-SCP/src/authentication-password-management/validation-and-storage.md \" [LINK](https://github.com/OWASP/Go-SCP/blob/52c322fc3949002c380c7956464d0e5f091ddb93/src/authentication-password-management/validation-and-storage.md#storing-password-securely-the-practice) the reference links for hashing functions are wrong.\r\n\r\nIt should be something similar to:\r\n```md\r\nIn the case of password storage, the hashing algorithms recommended by\r\n[OWASP][2] are [`bcrypt`][3], [`PDKDF2`][4], [`Argon2`][5] and [`scrypt`][6].\r\n\r\n[1]: ../cryptographic-practices/pseudo-random-generators.md\r\n[2]: https://www.owasp.org/index.php/Password_Storage_Cheat_Sheet\r\n[3]: https://godoc.org/golang.org/x/crypto/bcrypt\r\n[4]: https://godoc.org/golang.org/x/crypto/pbkdf2\r\n[5]: https://github.com/p-h-c/phc-winner-argon2\r\n[6]: https://pkg.go.dev/golang.org/x/crypto/scrypt\r\n[7]: https://github.com/ermites-io/passwd\r\n```", + "summary": "The issue highlights incorrect reference links for hashing algorithms in the \"validation-and-storage.md\" file related to password management. The suggested correction includes updating the references to point to the appropriate OWASP resources and hashing algorithm documentation. To resolve this, the links should be adjusted to ensure they accurately direct users to the intended references for `bcrypt`, `PDKDF2`, `Argon2`, and `scrypt`.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/81", @@ -25689,6 +26576,7 @@ "node_id": "MDU6SXNzdWU5NDAwMjY4NTU=", "title": "Validation section fix", "body": "From the Validation section, is this part backwards? \r\n\r\n`Anytime data is passed from a trusted source to a less-trusted source,...`", + "summary": "The issue questions the clarity of a statement in the Validation section, suggesting that it may be phrased incorrectly. The text in question implies a potential misunderstanding regarding the relationship between trusted and less-trusted sources. It may be necessary to rephrase this section for better clarity and accuracy.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/82", @@ -25715,6 +26603,7 @@ "node_id": "MDU6SXNzdWU5NDA4NDIxNDE=", "title": "Source for this?", "body": "\"sequential authentication implementations (like Google does nowadays)\" in https://github.com/OWASP/Go-SCP/blob/master/src/authentication-password-management/validation-and-storage.md", + "summary": "The issue raises a question about the source of the statement regarding \"sequential authentication implementations\" as mentioned in a specific document related to authentication and password management. To address this, it would be helpful to provide references or documentation that support this claim, particularly examples of how Google implements such sequential authentication methods.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/83", @@ -25741,6 +26630,7 @@ "node_id": "MDU6SXNzdWU5NDA4NTIyNDE=", "title": "SHA256 for pass hashing?", "body": "Should we remove the 1st example here in case someone doesn't read the rest of the page? https://github.com/OWASP/Go-SCP/blob/master/src/authentication-password-management/validation-and-storage.md", + "summary": "The issue raises a concern about the use of SHA256 for password hashing in a specific example on a documentation page. It suggests that the first example might be misleading for users who do not read the entire content and could lead to improper password management practices. The implied action is to consider removing or modifying the first example to prevent potential misuse.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/84", @@ -25767,6 +26657,7 @@ "node_id": "I_kwDOBTCWsc5P-0Od", "title": "Section \"Sanitization\" should be under \"Output Encoding\", not \"Input Validation\"", "body": "The section \"Sanitization\" talks about what needs to be done to safely display user submitted content, which doesn't actually have anything to do with \"Input Validation\", despite being a part of that chapter.\r\n\r\nHaving this section in the wrong place can mislead developers and give them a false sense of security (\"I don't need to worry about XSS, because I've removed the HTML stuff\").\r\n\r\nI suggest moving the \"Sanitization\" section to the \"Output Encoding\" chapter, probably renaming it to something like \"HTML\".", + "summary": "The \"Sanitization\" section is currently placed under \"Input Validation,\" which is misleading as it primarily addresses how to safely display user-submitted content. This misplacement may lead developers to underestimate their need to address XSS vulnerabilities. It is suggested to move the \"Sanitization\" section to the \"Output Encoding\" chapter and consider renaming it to something like \"HTML\" for better clarity. Adjusting this organization could enhance understanding and security practices among developers.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/90", @@ -25793,6 +26684,7 @@ "node_id": "I_kwDOBTCWsc5gTRIm", "title": "gorilla is archived/no longer maintained", "body": "Page 9 recommends gorilla as a 3rd party package, but the gorilla maintainers have archived the project.", + "summary": "The issue highlights that the gorilla package is archived and no longer maintained, yet it is still recommended on page 9 as a third-party package. It suggests that the recommendation should be updated to reflect the current status of gorilla, potentially by removing it or replacing it with a maintained alternative.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/95", @@ -25821,6 +26713,7 @@ "node_id": "I_kwDOBTCWsc5hxnm4", "title": "Add automation", "body": "It would be good to add some automation to this project, nothing fancy - probably a link checker to start with. This could be run on pull-request or merge to main, and for example could check for broken links\r\n\r\nWe do something similar in [Secure Coding Practices](https://github.com/OWASP/secure-coding-practices-quick-reference-guide/blob/main/.github/workflows/housekeeping.yaml)", + "summary": "The issue suggests implementing automation in the project, specifically a link checker to identify broken links. This automation could be triggered during pull requests or merges to the main branch. A reference to a similar implementation in another project is provided as an example. To address this, consider creating a workflow that incorporates a link-checking tool.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Go-SCP/issues/96", @@ -25849,6 +26742,7 @@ "node_id": "MDU6SXNzdWUyMTgzMzg1MTU=", "title": "Q2 2017 expenses", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/VirtualVillage/issues/1", @@ -25877,6 +26771,7 @@ "node_id": "MDU6SXNzdWUyMTgzMzkyNTQ=", "title": "VMware Sponsorship Status", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/VirtualVillage/issues/2", @@ -25905,6 +26800,7 @@ "node_id": "MDU6SXNzdWUyMTgzMzk1NTg=", "title": "Update Project page", "body": "Current Project Page -- https://www.owasp.org/index.php/OWASP_Virtual_Village_Project#tab=Main\r\n\r\nMigrate away from project page hosted on owasp and move entirely to github", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/VirtualVillage/issues/3", @@ -25933,6 +26829,7 @@ "node_id": "MDU6SXNzdWUyMTgzMzk5MTE=", "title": "Documentation", "body": "Produce ongoing Documentation with regards to Architecture , Access as well as projects hosted inside of VirtualVillage.\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/VirtualVillage/issues/4", @@ -25961,6 +26858,7 @@ "node_id": "MDU6SXNzdWUyMTgzNDAyMDI=", "title": "Create a Request form ", "body": "Create request form for virtual village Resources , Funding as well as Volunteers.\r\n\r\nhttps://goo.gl/forms/muaaixyTGWgJSLTk1", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/VirtualVillage/issues/5", @@ -25985,10 +26883,11 @@ "pk": 915, "fields": { "nest_created_at": "2024-09-11T21:19:55.839Z", - "nest_updated_at": "2024-09-11T21:19:55.839Z", + "nest_updated_at": "2024-09-13T16:14:32.877Z", "node_id": "MDU6SXNzdWUyMTgzNDA1NzE=", "title": "deprecate Trello for managing Roadmap", "body": "https://trello.com/b/kyx0xHcu/owasp-virtual-village <-- Deprecated ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/VirtualVillage/issues/6", @@ -26017,6 +26916,7 @@ "node_id": "MDU6SXNzdWUzMjMwNDE5NDc=", "title": "Add content for secure compiler settings for Android NDK ", "body": "In chapter: \"Testing Code Quality and Build Settings of Android Apps\" \r\n\r\nEnhance section \"[Make Sure That Free Security Features Are Activated (MSTG-CODE-9)](https://github.com/OWASP/owasp-mstg/blob/master/Document/0x05i-Testing-Code-Quality-and-Build-Settings.md#make-sure-that-free-security-features-are-activated-mstg-code-9)\" with \"Secure Compiler Settings for Android NDK\".\r\n\r\n- What do you have to consider when compiling your NDK binaries?\r\n- How is this done in the latest Android Studio release. What are the defaults? What has to be considered by the developer?\r\n- What else can be done with the new ndk changes? (https://developer.android.com/about/versions/10/features#fg-service-types)\r\n\r\nRefs:\r\n- PIE enabled: http://vinsol.com/blog/2014/08/19/compiling-native-libraries-for-android-l/\r\n- https://code.google.com/archive/p/android-developer-preview/issues/888\r\n- http://web.guohuiwang.com/technical-notes/androidndk2", + "summary": "The issue suggests enhancing the section on \"Make Sure That Free Security Features Are Activated (MSTG-CODE-9)\" in the chapter about testing code quality and build settings for Android apps by adding content specifically focused on secure compiler settings for the Android NDK. Key points to address include considerations when compiling NDK binaries, the process in the latest Android Studio release, default settings, developer considerations, and additional capabilities introduced with recent NDK changes. References to relevant resources are provided for further guidance. \n\nTo proceed, the content should be researched and drafted to cover these aspects comprehensively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/889", @@ -26046,6 +26946,7 @@ "node_id": "MDU6SXNzdWUzNDc3NzAzMTQ=", "title": "Add section on Android enterprise", "body": "With android 8, 8.1 and 9, a lot of love is being put into android enterprise.\r\nLet's have a section on this & how to secure android for enterprise apps the right way!\r\n\r\nhttps://developer.android.com/work/versions/Android-8.0 & https://developer.android.com/preview/work", + "summary": "The issue proposes the addition of a section dedicated to Android enterprise, highlighting the improvements made in Android versions 8, 8.1, and 9. It emphasizes the importance of documenting best practices for securing Android applications intended for enterprise use. To address this, relevant resources from the Android developer documentation should be incorporated.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/940", @@ -26075,6 +26976,7 @@ "node_id": "MDU6SXNzdWUzNDc3NzA1Nzg=", "title": "Android Oreo change: broadcast limitations & background service limitations", "body": "With Android Oreo, you now have new broadcast (implicit intent) limitations, as well as limitations as a background service. This will influence how the rogue apps might be able to get to your intents.\r\n\r\ntime to update the MSTG on the android sections regarding broadcasts and (possibly) background services for platform interaction for Android:\r\n- [ ] what should we look for with broadcasts? (What is secure/insecure?)\r\n- [ ] what should we look for with broadcast-receivers? (What is secure/insecure?)\r\n- [ ] what about sticky broadcasts? (till when do they work? what to look for?)\r\n- [ ] what about intent receivers?\r\n- [ ] when is a broadcast receiver exported or not?\r\n\r\nTask: update section \"[Testing for Sensitive Functionality Exposure Through IPC (MSTG-PLATFORM-4)](https://github.com/OWASP/owasp-mstg/blob/master/Document/0x05h-Testing-Platform-Interaction.md#testing-for-sensitive-functionality-exposure-through-ipc-mstg-platform-4)\" with these changes.", + "summary": "The issue addresses the need to update the MSTG documentation to reflect the new broadcast and background service limitations introduced in Android Oreo. Specific points of focus include identifying secure and insecure practices related to broadcasts, broadcast receivers, sticky broadcasts, intent receivers, and the conditions under which a broadcast receiver is exported. \n\nAction required: Revise the section \"[Testing for Sensitive Functionality Exposure Through IPC (MSTG-PLATFORM-4)](https://github.com/OWASP/owasp-mstg/blob/master/Document/0x05h-Testing-Platform-Interaction.md#testing-for-sensitive-functionality-exposure-through-ipc-mstg-platform-4)\" to incorporate these updates.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/941", @@ -26105,6 +27007,7 @@ "node_id": "MDU6SXNzdWUzNDc3NzA5NzA=", "title": "Android Oreo and P update: autofill framework", "body": "With the autofill framework introduced in Android Oreo (and the change of the fact that An EditText now returns an Editable instead of a Char-array) we need to update our sections on input, autocomplete, etc. especially when it comes to handling confidential information.\r\n\r\nhttps://developer.android.com/guide/topics/text/autofill & https://developer.android.com/preview/features/autofill\r\n\r\nLet's update the general authentication chapter (or local.. and study the difference for iOS) to discuss autocomplete/autofill for passwords, 2FA codes.\r\nNext, make a short update on the data storage as well regarding autofilling secret information.", + "summary": "The issue discusses the need to update documentation and implementation related to the autofill framework introduced in Android Oreo and updated in Android P. Key areas for revision include input handling, autocomplete features, and the secure management of confidential information like passwords and 2FA codes. It suggests updating the general authentication chapter and reviewing data storage practices concerning autofilling sensitive data. Action points include revising the relevant sections and studying iOS differences where applicable.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/943", @@ -26136,6 +27039,7 @@ "node_id": "MDU6SXNzdWUzNDc3NzE0MTc=", "title": "Android Oreo updates: changes in how shared memory is handled", "body": "With Android Oreo, the way you can handle shared memory (Securely) has changed. time to udpate the MSTG! https://developer.android.com/about/versions/oreo/android-8.1 \r\nLet's check the platform interaction section & data storage section.", + "summary": "The issue highlights the need to update the MSTG to reflect changes in shared memory handling introduced in Android Oreo. Specifically, it suggests reviewing the platform interaction and data storage sections to ensure they are aligned with the new guidelines. It may be necessary to incorporate the updated practices for secure shared memory management as outlined in the Android documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/945", @@ -26151,8 +27055,8 @@ "repository": 1127, "assignees": [], "labels": [ - 205, - 208 + 208, + 205 ] } }, @@ -26165,6 +27069,7 @@ "node_id": "MDU6SXNzdWUzNDc3NzE5OTc=", "title": "Android P update: Android protected confirmation", "body": "A new security feature `Android Protected Confirmation` has arrived to Android P. This can help with non-repudiation and alike. Time to start a write up about it and see how we can best relate this to the (M)ASVS.. https://developer.android.com/preview/features/security\r\nWe can update local authentication and then see how we can formalize the reasoning for non-repudiation in both the masvs and mstg.\r\nNote: we will need an issue at the MASVS in which non-repudiation is covered for med/high risk transactions/actions. (including privacy, financial, etc.)\r\n", + "summary": "The issue discusses the introduction of the `Android Protected Confirmation` feature in Android P, which enhances security and non-repudiation. There is a proposal to create a write-up on this feature and its relevance to the (M)ASVS. The next steps include updating local authentication methods and formalizing the reasoning for non-repudiation within the MASVS and MSTG frameworks. Additionally, a separate issue should be created to address non-repudiation for medium to high-risk transactions and actions, covering areas like privacy and financial security.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/948", @@ -26194,6 +27099,7 @@ "node_id": "MDU6SXNzdWUzNDc3NzM0MjQ=", "title": "Android Oreo change: Install from unknown sources", "body": "You can now test whether the user is allowed to install unknown sources. This can help in checking the security posture of the device. It is a good idea to raise awareness with android developers to check whether there are fake apps in the appstore which should represent theirs and we should for L2 apps make the dev check whether unknown sources are allowed and if so, warned he user about this \r\nhttps://developer.android.com/about/versions/oreo/android-8.0-changes .", + "summary": "The issue discusses the changes in Android Oreo regarding the ability to check if a user is allowed to install apps from unknown sources. It emphasizes the importance of raising awareness among Android developers about the risks of fake apps in app stores. For Level 2 (L2) applications, it suggests that developers should verify if the installation from unknown sources is enabled and, if so, warn users about the potential security risks. Developers may need to implement checks and warnings in their applications to enhance user security.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/954", @@ -26224,6 +27130,7 @@ "node_id": "MDU6SXNzdWUzNDc3NzM4MDI=", "title": "Android Oreo update: change in classloaders & native app-wrapping", "body": "Quiet some stuff has changed for Android Oreo if it comes to classloaders and how native app-parts can be wrapped. Time to update the MSTG on this! https://developer.android.com/about/versions/oreo/android-8.0-changes\r\n\r\n", + "summary": "The issue discusses the need to update the MSTG (Mobile Security Testing Guide) to reflect the changes in classloaders and native app-wrapping introduced in Android Oreo. It references the official Android documentation outlining these changes. The suggested action is to review the updates in the Android Oreo version and incorporate relevant information into the MSTG.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/955", @@ -26250,6 +27157,7 @@ "node_id": "MDU6SXNzdWUzNjk5MjA0NjE=", "title": "Create testcase issues on data storage for Parse and CouchDB", "body": "In our data storage chapters (see 0x05d for Android and 0x06d for iOS), we miss writeups for Parse and CouchDB.\r\n\r\nAs this is a good first issue, we would like to invite anyone to start working on the following:\r\n\r\n- Explain how the storagemechanism works (so what does CouchDB and Parse do?) in the overview part. Explain what could go wrong: where do you have to pay attention to? Thing about how the storage can be overriden or extracted?\r\n- Explain in the static analysis part what to look for in terms of wrong and right code: what are signs that the technology is used? What are signs that it is used insecurely?\r\n- Explain in the dynamic analysis part what to look for in terms of wrong behavior: what should a pentester/developer check to see issues in the behavior of the app regarding the integraiton?", + "summary": "The issue highlights the need for comprehensive test case write-ups for Parse and CouchDB in the data storage chapters for both Android and iOS. It emphasizes the following key areas to be addressed:\n\n1. Overview of the storage mechanisms for both Parse and CouchDB, including potential pitfalls and security considerations.\n2. Static analysis guidelines detailing indicators of correct and incorrect code usage, as well as signs of insecure implementations.\n3. Dynamic analysis recommendations for pentesters and developers to identify behavioral issues related to the integration of these storage solutions.\n\nTo proceed, contributors are encouraged to develop detailed explanations and examples for each of these sections.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1041", @@ -26280,6 +27188,7 @@ "node_id": "MDU6SXNzdWUzNzIyNTc3NDA=", "title": "Clean Git Repo", "body": "**Describe the issue**\r\nThe github repo is using already 374MB. We should clean it:\r\n\r\nhttps://stackoverflow.com/questions/2100907/how-to-remove-delete-a-large-file-from-commit-history-in-git-repository", + "summary": "The GitHub repository has grown to 374MB, indicating the need for a cleanup to reduce its size. The issue suggests looking into methods for removing large files from the commit history, as referenced in a provided Stack Overflow link. To address this, it would be beneficial to follow the guidelines for cleaning up the repository by removing unnecessary large files.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1050", @@ -26308,6 +27217,7 @@ "node_id": "MDU6SXNzdWUzOTgyMDE4Mzg=", "title": "Add MSTG-RESILIENCE-7 and MSTG-RESILIENCE-8", "body": "Add 8.7 and 8.8 for android and ios: show how you can delay the attacker or report tampering to the backend as a response to a tamper detected\r\n8.7: The app implements multiple mechanisms in each defense category (8.1 to 8.6). Note that resiliency scales with the amount, diversity of the originality of the mechanisms used.\r\n8.8: The detection mechanisms trigger responses of different types, including delayed and stealthy responses.", + "summary": "The issue proposes the addition of two specific resilience guidelines, MSTG-RESILIENCE-7 and MSTG-RESILIENCE-8, along with sections 8.7 and 8.8 for both Android and iOS platforms. Section 8.7 emphasizes the implementation of multiple mechanisms across various defense categories to enhance resiliency, while section 8.8 focuses on the necessity for detection mechanisms to trigger diverse responses, including delayed and stealthy ones. It suggests detailing how to effectively delay an attacker or report tampering to the backend upon detection. Further work is needed to develop and integrate these guidelines into the existing framework.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1113", @@ -26323,8 +27233,8 @@ "repository": 1127, "assignees": [], "labels": [ - 202, 209, + 202, 211 ] } @@ -26338,6 +27248,7 @@ "node_id": "MDU6SXNzdWUzOTgyMTQwNDQ=", "title": "Add missing reverse engineering testcases for iOS", "body": "Add missing reverse engineering testcases for iOS given the following MASVS requirements:\r\n- [x] 8.4 The app detects, and responds to, the presence of widely used reverse engineering tools and frameworks on the device.\r\n- [x] 8.5 The app detects, and responds to, being run in an emulator.\r\n- [ ] 8.6 The app detects, and responds to, tampering the code and data in its own memory space.\r\n- [x] 8.9 Obfuscation is applied to programmatic defenses, which in turn impede de-obfuscation via dynamic analysis.\r\n- [ ] 8.11 All executable files and libraries belonging to the app are either encrypted on the file level and/or important code and data segments inside the executables are encrypted or packed. Trivial static analysis does not reveal important code or data.\r\n", + "summary": "The issue highlights the need to add missing reverse engineering test cases for iOS in accordance with specific MASVS requirements. While several requirements have been addressed, there are still two that need attention: \n\n1. The app's ability to detect and respond to tampering of code and data in its own memory space (8.6).\n2. Ensuring all executable files and libraries are encrypted or packed to protect against trivial static analysis revealing important code or data (8.11).\n\nTo move forward, the implementation of test cases for these two requirements is necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1116", @@ -26369,6 +27280,7 @@ "node_id": "MDU6SXNzdWU0MTQ1NDIyMjY=", "title": "[Android] Ways to detect Magisk", "body": "**Describe the issue**\r\nThe test cases for root detection do not seem to be able to detect the latest Magisk (18.1) with Magisk Hide and Hide Magisk Manager enabled. Are there any ways to detect this?", + "summary": "The issue discusses the inability of current root detection test cases to identify the latest version of Magisk (18.1) when both Magisk Hide and Hide Magisk Manager are activated. It suggests that there may be a need to explore or develop new methods for reliably detecting Magisk in such scenarios.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1146", @@ -26398,6 +27310,7 @@ "node_id": "MDU6SXNzdWU0MjM1OTgzMTk=", "title": "iOS 12 update: update on password management & credential/session sharing", "body": "given iOS 12, there are a few best practices that app developers should embrace, which we should test for to make sure that password generation, usage, etc. is done correctly. This should be part of platform integration. Some pointers:\r\n- https://developer.apple.com/documentation/authenticationservices/aswebauthenticationsession\r\n- https://developer.apple.com/documentation/security/password_autofill/about_the_password_autofill_workflow\r\n- https://developer.apple.com/documentation/security/shared_web_credentials\r\n- https://developer.apple.com/documentation/security/shared_web_credentials/managing_shared_credentials", + "summary": "The issue discusses the need for app developers to adopt best practices regarding password management and credential/session sharing in light of iOS 12 updates. It emphasizes the importance of testing these practices to ensure proper password generation and usage. Key resources are provided for developers to reference, including documentation on web authentication sessions, password autofill workflows, and managing shared credentials. The next steps involve integrating these practices into the platform and conducting thorough testing to verify compliance with the new guidelines.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1162", @@ -26413,8 +27326,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 211 + 211, + 207 ] } }, @@ -26427,6 +27340,7 @@ "node_id": "MDU6SXNzdWU0MjM2NTMwNzU=", "title": "iOS and Android: improve platform interaction on notifications", "body": "One thing we might want to add to the MSTG is that we should, optionally, try to limit the information shared through notifications when they have a high confidentiality.", + "summary": "The issue discusses the need to enhance platform interaction regarding notifications in iOS and Android applications. It suggests that the Mobile Security Testing Guide (MSTG) should incorporate guidelines to limit the information shared in notifications for data with high confidentiality. To address this, it may be necessary to review and update the relevant sections of the MSTG to include these recommendations.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1163", @@ -26445,9 +27359,9 @@ ], "labels": [ 202, + 211, 205, - 206, - 211 + 206 ] } }, @@ -26460,6 +27374,7 @@ "node_id": "MDU6SXNzdWU0MzEzNDI3NDU=", "title": "update oauth2 ", "body": "Enhance Oauth 2 recommendations based on evaluationing the following RFC: https://tools.ietf.org/html/rfc8252", + "summary": "The issue discusses the need to enhance OAuth 2 recommendations by evaluating RFC 8252. To address this, a review of the specified RFC should be conducted, followed by updates to the existing OAuth 2 documentation and guidelines to incorporate the necessary improvements.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1174", @@ -26488,6 +27403,7 @@ "node_id": "MDU6SXNzdWU0MzUwMDE0Njg=", "title": "Update 0x05a-Platform-Overview regarding ART", "body": "**Describe the issue**\r\n\r\nCheck the content of 0x05a-Platform-Overview, and verify the explanation of ART and Dalvik (and remove if still there). Link to Android documentation wherever possible and reduce the explanation to a minimum. \r\n", + "summary": "The issue requests an update to the 0x05a-Platform-Overview section, specifically concerning the explanation of ART (Android Runtime) and Dalvik. It suggests verifying the current content, removing outdated references if necessary, and linking to the official Android documentation. The aim is to streamline the explanation to its essential points. \n\nTo address this, the content should be reviewed, unnecessary details removed, and relevant documentation links incorporated.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1181", @@ -26516,6 +27432,7 @@ "node_id": "MDU6SXNzdWU0NDI2MzU0NDc=", "title": "Slow down attack’s on Android keystore via side channel attacks MSTG‑STORAGE‑14", "body": "**Describe the issue**\r\nMitigating steps to address a new attack form NCC against Qualcomm backed key stores should be added to MSTG. \r\n\r\nhttps://www.nccgroup.trust/us/our-research/private-key-extraction-qualcomm-keystore/", + "summary": "The issue highlights the need to incorporate mitigation strategies for a new type of side-channel attack targeting Qualcomm-backed Android key stores into the MSTG documentation. It references research by NCC Group detailing these vulnerabilities. To address this, relevant updates and guidelines should be added to the MSTG to help developers protect against these attacks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1201", @@ -26531,8 +27448,8 @@ "repository": 1127, "assignees": [], "labels": [ - 202, - 208 + 208, + 202 ] } }, @@ -26545,6 +27462,7 @@ "node_id": "MDU6SXNzdWU0NzMzNTI2NDM=", "title": "5c - Create Information Gathering (Dynamic Instrumentation)", "body": "Create Tampering and RI -> Dynamic Instrumentation -> Information gathering\r\n\r\nafter \"Tampering and RI -> Dynamic Instrumentation -> Tooling\r\n\r\n```\r\n##### Information Gathering\r\n###### Getting Loaded Libraries\r\n###### Getting Loaded Classes and their Methods\r\n###### Getting Runtime Dependencies\r\n```\r\n\r\nThis section is intended to show a couple of cases where you can apply dynamic analysis for Information Gathering and present some of the things you can retrieve.", + "summary": "The issue discusses the need to develop a section on \"Information Gathering\" within the context of dynamic instrumentation. It outlines specific areas to focus on, including retrieving loaded libraries, loaded classes and their methods, and runtime dependencies. The goal is to illustrate how dynamic analysis can be applied for information gathering and to showcase the types of information that can be extracted. It suggests that further elaboration and examples are needed in this section to enhance understanding and practical application.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1364", @@ -26573,6 +27491,7 @@ "node_id": "MDU6SXNzdWU0NzcyOTcxODA=", "title": "Use of PKCE is recommended in 0x04e, but there appears to be no testcase for verifying if it's in place", "body": "**Platform:**\r\nBoth\r\n**Description:**\r\nWhen using the OAuth2 authorization code flow, code interception attacks are possible, which can be mitigated through implementing PKCE. A testcase for verifying the proper working of this mechanism would be good IMO. At the same time, this might be practically infeasible due to \r\n\r\nA) an additional app that registers the same protocol as the app under test to catch the callback URL, and\r\n\r\nB) the need for a server that implements OAuth and that we can legitimately advise readers to (ab)use for testing purposes.\r\n\r\nHappy to discuss further.", + "summary": "The issue highlights the absence of a test case for verifying the implementation of PKCE (Proof Key for Code Exchange) in the OAuth2 authorization code flow, which is recommended to prevent code interception attacks. The author notes challenges in creating such a test case, including the need for an additional app to register the same protocol and a compliant OAuth server for testing. Further discussion on this topic is encouraged. It may be beneficial to explore potential solutions or alternatives for implementing test cases that could validate PKCE functionality.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1402", @@ -26603,6 +27522,7 @@ "node_id": "MDU6SXNzdWU0ODQyMjk0MDc=", "title": "[Android/iOS Tool] Dwarf", "body": "Platform:\r\nAndroid/iOS\r\n\r\nDescription:\r\nEvaluate Dwarf (http://www.giovanni-rocca.com/dwarf/) and check if it makes sense to add to MSTG. If it does add it to 0x05c/0x6c.", + "summary": "The issue involves evaluating the Dwarf tool for potential inclusion in the MSTG documentation. The task is to assess its relevance and determine if it should be added to sections 0x05c or 0x6c. If found suitable, it needs to be integrated accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1421", @@ -26631,6 +27551,7 @@ "node_id": "MDU6SXNzdWU0OTI3MTI0NTk=", "title": "Android Anti-Reversing Defenses: Testing File Integrity Checks (MSTG-RESILIENCE-3) requires updating", "body": "hi\r\n\r\nThere are few comments related to: https://github.com/OWASP/owasp-mstg/blob/master/Document/0x05j-Testing-Resiliency-Against-Reverse-Engineering.md#testing-file-integrity-checks-mstg-resilience-3\r\n\r\n1. Google Play modify AndroidManifest.xml after application publishing - did you see such behaviour? \r\n\r\n2. Android App Bundles: From documentation - \"Google Play automatically generates and serves optimized APKs for each user’s device configuration\": https://developer.android.com/studio/build/configure-apk-splits\r\nLooks like checksum or hash for *.dex files is useless in this case.\r\n\r\n", + "summary": "The issue discusses the need for updates related to the testing of file integrity checks in Android applications, specifically in the context of the MSTG-RESILIENCE-3 guideline. Two main points are raised: \n\n1. There is a question about whether Google's modification of the AndroidManifest.xml after app publishing has been observed.\n2. Concerns are raised regarding the utility of checksums or hashes for *.dex files given that Google Play generates optimized APKs for different device configurations.\n\nTo address these points, further investigation is needed into the implications of these behaviors on integrity checks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1449", @@ -26659,6 +27580,7 @@ "node_id": "MDU6SXNzdWU0OTI5MjU4ODE=", "title": "[Android Tool] Dexcalibur", "body": "Evaluate Dexcalibur (https://github.com/FrenchYeti/dexcalibur/wiki/Gallery) and check if it makes sense to add to MSTG. If it does add it to 0x05b.\r\n\r\nhttps://www.youtube.com/watch?v=2dGoolvMEpI\r\n", + "summary": "The issue requests an evaluation of Dexcalibur, a tool available on GitHub, to determine its relevance for inclusion in the Mobile Security Testing Guide (MSTG). If deemed appropriate, the tool should be added to section 0x05b of the guide. A provided video link may offer additional insights into the tool's functionality.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1450", @@ -26687,6 +27609,7 @@ "node_id": "MDU6SXNzdWU1MDI4MTU5ODQ=", "title": "Create testcase for MSTG-Storage-13 & MSTG‑STORAGE‑14", "body": "MSTG-Storage-13: No sensitive data should be stored locally on the mobile device. Instead, data should be retrieved from a remote endpoint when needed and only be kept in memory. \r\nand \r\nMSTG-Storage-14: If sensitive data is still required to be stored locally, it should be encrypted using a key derived from hardware backed storage which requires authentication.\r\nrequiers a testcase. This can be done with a generic extension of the memory tests at both the platforms as well as storage tests. in case encryption is then necessary, a reference to MSTG-Storage-1 testcase can be made.\r\nfor both android and ios", + "summary": "The issue discusses the need to create test cases for two specific security guidelines related to data storage on mobile devices: MSTG-Storage-13 and MSTG-Storage-14. MSTG-Storage-13 emphasizes that sensitive data should not be stored locally but instead retrieved from a remote endpoint, while MSTG-Storage-14 states that if local storage of sensitive data is necessary, it must be securely encrypted.\n\nIt suggests that the test cases can be derived from existing memory and storage tests applicable to both Android and iOS platforms. Additionally, if encryption is involved, a reference to the MSTG-Storage-1 test case should be included. The next step is to develop these test cases accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1484", @@ -26702,8 +27625,8 @@ "repository": 1127, "assignees": [], "labels": [ - 202, 208, + 202, 211 ] } @@ -26717,6 +27640,7 @@ "node_id": "MDU6SXNzdWU1MDI4MTYyMDU=", "title": "Create testcase for MSTG-Storage-15", "body": "We need a testcase for MSTG-STORAGE-15:\r\n\r\nThe app’s local storage should be wiped after an excessive number of failed authentication attempts.\r\n for both android and ios", + "summary": "A request has been made to create a test case for MSTG-Storage-15, which focuses on ensuring that the app's local storage is cleared after a specified number of failed authentication attempts. Test cases should be developed for both Android and iOS platforms to verify this functionality.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1485", @@ -26732,8 +27656,8 @@ "repository": 1127, "assignees": [], "labels": [ - 202, 208, + 202, 211 ] } @@ -26747,6 +27671,7 @@ "node_id": "MDU6SXNzdWU1MDI4MTY2MTk=", "title": "Extend MSTG-CODE-4 testcases", "body": "The MSTG-Code-4 testcases should be extended to include checks for any other developer asissting code (backdoors, etc.).", + "summary": "The issue discusses the need to expand the existing MSTG-Code-4 test cases to incorporate checks for additional developer-assisted code, including potential backdoors. It suggests enhancing the test coverage to ensure a more thorough examination of code integrity. This could involve identifying specific areas where such code may be hidden and creating corresponding test scenarios.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1486", @@ -26777,6 +27702,7 @@ "node_id": "MDU6SXNzdWU1MDI4MzIwOTQ=", "title": "Extend webview testcases with MSTG-Platform-10", "body": "MSTG-Platform-10: A WebView's cache, storage, and loaded resources (JavaScript, etc.) should be cleared before the WebView is destroyed.\r\n\r\n- [X] Android https://github.com/OWASP/owasp-mstg/pull/1984\r\n- [ ] iOS TBD\r\n\r\n\r\nMaybe include to do the cleanup when a webview has hung? https://developer.android.com/about/versions/10/features#webview-hung\r\n\r\n\r\n## Research notes for iOS\r\n\r\nCheck WebView Config / init for params/functions preventing files from being stored in the first place\r\n\r\nOne option is to use [WKWebsiteDataStore.nonPersistent()](https://developer.apple.com/documentation/webkit/wkwebsitedatastore/1532934-nonpersistent). Example here:\r\n\r\nhttps://github.com/duckduckgo/iOS/blob/cb600c312c1c9884f72d0cffe00318dd0de9faab/Core/WKWebViewConfigurationExtension.swift\r\n\r\n\r\nThis is old code but check if it's good for whenever nonPersistent is not an option:\r\n\r\n```swift\r\n// https://gist.github.com/insidegui/4a5de215a920885e0f36294d51263a15\r\n\r\n HTTPCookieStorage.shared.removeCookies(since: Date.distantPast)\r\nWKWebsiteDataStore.default().fetchDataRecords(ofTypes: WKWebsiteDataStore.allWebsiteDataTypes()) { records in\r\n records.forEach { record in\r\n WKWebsiteDataStore.default().removeData(ofTypes: record.dataTypes, for: [record], completionHandler: {})\r\n print(\"Cookie ::: \\(record) deleted\")\r\n }\r\n}\r\n```\r\n\r\nMore useful linnks for the research:\r\n\r\n- https://github.com/mozilla-mobile/focus-ios/blob/main/Blockzilla/WebCacheUtils.swift\r\n- https://github.com/duckduckgo/iOS/blob/cb600c312c1c9884f72d0cffe00318dd0de9faab/Core/WebCacheManager.swift\r\n- https://github.com/mozilla-mobile/firefox-ios/blob/main/Client/Frontend/Settings/Clearables.swift\r\n", + "summary": "The issue highlights the need to extend WebView test cases in accordance with MSTG-Platform-10, which mandates that a WebView's cache, storage, and loaded resources should be cleared before the WebView is destroyed. While the Android implementation has been addressed, the iOS implementation is still pending.\n\nAdditionally, there's a suggestion to incorporate cleanup procedures when a WebView hangs, referencing Android documentation on handling such scenarios.\n\nFor iOS, research is suggested into WebView configuration and initialization parameters that prevent storage of files. One recommended approach is using `WKWebsiteDataStore.nonPersistent()`. If that option isn't suitable, the issue provides a code snippet for clearing cookies and website data.\n\nTo move forward, the iOS implementation needs to be developed, ensuring that the cleanup process adheres to the specified guidelines and incorporates any necessary research findings.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1487", @@ -26806,6 +27732,7 @@ "node_id": "MDU6SXNzdWU1MDI4MzI1MjQ=", "title": "Implement testcase for MSTG-PLATFORM-11", "body": "MSTG-PLATFORM-11: Verify that the app prevents usage of custom third-party keyboards whenever sensitive data is entered.", + "summary": "The issue requests the implementation of a test case for MSTG-PLATFORM-11, which focuses on ensuring that the application blocks the use of custom third-party keyboards when sensitive data is being entered. The task involves creating a test scenario to verify this functionality.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1488", @@ -26835,6 +27762,7 @@ "node_id": "MDU6SXNzdWU1MDI5MjIwMzk=", "title": "Extend MSTG‑NETWORK‑3 with Certificate transparency for iOS and Android", "body": "Extend MSTG‑NETWORK‑3 with Certificate transparency for iOS and Android", + "summary": "The issue requests an extension of the MSTG-NETWORK-3 guideline to include support for Certificate Transparency for both iOS and Android platforms. It suggests that the current guidelines should be updated to address how to implement and utilize Certificate Transparency in mobile applications. To move forward, additional research and documentation regarding the integration of Certificate Transparency in the context of mobile app security may be necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1492", @@ -26863,6 +27791,7 @@ "node_id": "MDU6SXNzdWU1MDI5MjI0NjY=", "title": "[Android] Have a testcase for MSTG‑RESILIENCE‑13", "body": "We need a testcase for MSTG‑RESILIENCE‑13: As a defense in depth, next to having solid hardening of the communicating parties, application level payload encryption can be applied to further impede eavesdropping.", + "summary": "A request has been made to create a test case for MSTG‑RESILIENCE‑13, which focuses on implementing application-level payload encryption as an additional security measure to enhance protection against eavesdropping. The task involves developing a test case that verifies this encryption mechanism within the application.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1494", @@ -26878,8 +27807,8 @@ "repository": 1127, "assignees": [], "labels": [ - 202, - 209 + 209, + 202 ] } }, @@ -26892,6 +27821,7 @@ "node_id": "MDU6SXNzdWU1MDI5MjQ2ODA=", "title": "Open todo's for 0x05a Broadcast Receiver fix ", "body": "TODO:\r\n\r\n1. What does the programmatically declared receiver mean in terms of android:exported and android:enabled? How can we make sure that not everything can start this?\r\n - https://github.com/OWASP/owasp-mstg/blob/master/Document/0x05h-Testing-Platform-Interaction.md#inspect-the-androidmanifest-2: update this regarding default values when setting a manifest-based receiver\r\n - https://github.com/OWASP/owasp-mstg/blob/master/Document/0x05h-Testing-Platform-Interaction.md#inspect-the-source-code-2: add what is the default and that this has to be considered when reviewing the code\r\n\r\n2. Link to https://github.com/OWASP/owasp-mstg/blob/master/Document/0x05h-Testing-Platform-Interaction.md#broadcast-permission-enforcement to show how security matters and update that section with the security considerations of https://developer.android.com/guide/components/broadcasts#security-and-best-practices", + "summary": "The issue outlines tasks related to addressing the handling of broadcast receivers in the documentation. Key points include clarifying the implications of programmatically declared receivers with respect to `android:exported` and `android:enabled` attributes, as well as ensuring that not all components can initiate these receivers. Specific updates are needed in the documentation regarding default values for manifest-based receivers and considerations for code reviews. Additionally, there is a need to enhance the section on broadcast permission enforcement by integrating security considerations from official Android guidelines. \n\nTo move forward, focus on updating the relevant documentation sections and adding necessary security insights.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1495", @@ -26920,6 +27850,7 @@ "node_id": "MDU6SXNzdWU1MDQyNTAxNjc=", "title": "Andriod 10 anti-tampering: run embedded uncompressed DEX directly", "body": "You can now run dEX code directly as anti-tampering control (see https://developer.android.com/about/versions/10/features#embedded-dex). this should be part of the anti-reverse engineering techniques", + "summary": "The issue discusses the implementation of running embedded uncompressed DEX files directly as a method of anti-tampering on Android 10. This approach is suggested as a part of anti-reverse engineering techniques. To address this issue, it may be necessary to integrate this feature into the existing anti-tampering controls within the application.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1506", @@ -26948,6 +27879,7 @@ "node_id": "MDU6SXNzdWU1MDQyNTU2NzY=", "title": "Update android 10 at storage: scoped storage (MSTG-STORAGE)", "body": "Update the android mstg on storage due to the introduction of scoped storage: https://developer.android.com/about/versions/10/features#create-files-external-storage", + "summary": "The issue requests an update to the Android MSTG regarding scoped storage, introduced in Android 10. The update should reflect the changes related to how files are created in external storage. To address this, the relevant documentation should be reviewed and revised to incorporate the new guidelines and practices introduced with scoped storage.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1508", @@ -26963,8 +27895,8 @@ "repository": 1127, "assignees": [], "labels": [ - 202, - 208 + 208, + 202 ] } }, @@ -26977,6 +27909,7 @@ "node_id": "MDU6SXNzdWU1MDQyNzAwNDI=", "title": "Have testcases for sign in with apple integration (MSTG-AUTH)", "body": "We should have testcases for signin with apple: https://developer.apple.com/sign-in-with-apple/", + "summary": "The issue highlights the need for the creation of test cases for the \"Sign in with Apple\" integration, following the guidelines provided by Apple. To address this, it is necessary to develop comprehensive test scenarios that ensure the functionality and security of the sign-in feature.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1511", @@ -26992,8 +27925,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 211 + 211, + 207 ] } }, @@ -27006,6 +27939,7 @@ "node_id": "MDU6SXNzdWU1MDQyNzIzNzg=", "title": "Create a testcase for not synchronizing confidential data using cloudkit", "body": "For more info on cloudkit: https://developer.apple.com/documentation/coredata/mirroring_a_core_data_store_with_cloudkit/", + "summary": "The issue suggests creating a test case to ensure that confidential data is not synchronized with CloudKit. It references the use of CloudKit in conjunction with Core Data for data mirroring. To address this, a test case should be developed that verifies the confidentiality of data during synchronization processes with CloudKit.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1513", @@ -27035,6 +27969,7 @@ "node_id": "MDU6SXNzdWU1MTQ2NDE4NDg=", "title": "migrate away crackme write-ups folder", "body": "migrate away crackme write-ups folder:\r\n- [ ] updrate crackme readme\r\n- [ ] migrate write ups to blogs with code attached", + "summary": "The issue discusses the need to migrate the \"crackme write-ups\" folder to a different format. Tasks outlined include updating the README for crackme and transferring the write-ups to a blog format, ensuring that the associated code is included. Action items should focus on both improving documentation and reformatting the content for better accessibility.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1541", @@ -27065,6 +28000,7 @@ "node_id": "MDU6SXNzdWU1MjI5NDcyNzk=", "title": "[Android Tool] VirtualXposed", "body": "VirtualXposed is a simple App based on VirtualApp and epic that allows you to use an Xposed Module without needing to root, unlock the bootloader, or flash a custom system image. (Supports Android 5.0~10.0)\r\n\r\nSee: https://github.com/android-hacker/VirtualXposed\r\n\r\nIt should be tested and evaluated to see if it's a nice fit to the MSTG tool set for Android. Similar to using Frida on non-rooted devices.\r\n", + "summary": "The issue discusses the need to test and evaluate VirtualXposed, an application that enables the use of Xposed Modules without rooting or unlocking the bootloader on Android devices (Android 5.0 to 10.0). The evaluation aims to determine if VirtualXposed can be effectively integrated into the MSTG tool set for Android, drawing a parallel to the usage of Frida on non-rooted devices. It is suggested that thorough testing be conducted to assess its compatibility and functionality within the existing tools.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1575", @@ -27093,6 +28029,7 @@ "node_id": "MDU6SXNzdWU1NDQ4MDI1NjE=", "title": "Enhance testing with Frida for non-jailbroken devices", "body": "**Describe the issue**\r\nRecent enhancements to the Frida open-source dynamic instrumentation toolkit greatly ease the process of conducting jailed testing. You no longer have to manually package the Frida Gadget in your target app. As long as the app is debuggable, Frida does that for you. This post will walk you through the process of using Frida on a jailed device.\r\n\r\nFrida version 12.7.12 and above include this improvement. Frida 12.8 is the most current version as of late December 2019.\r\nhttps://www.nowsecure.com/blog/2020/01/02/how-to-conduct-jailed-testing-with-frida/\r\n\r\n**Optional: Testcase or section**\r\nShould be added as another sub-section here: https://github.com/OWASP/owasp-mstg/blob/master/Document/0x06c-Reverse-Engineering-and-Tampering.md#dynamic-analysis-on-non-jailbroken-devices\r\n\r\n", + "summary": "The issue discusses the recent improvements in the Frida toolkit that simplify the process of conducting tests on non-jailbroken devices by automating the inclusion of the Frida Gadget in target apps. It highlights the need to enhance testing documentation to reflect these changes and suggests adding a subsection to the existing dynamic analysis section in the OWASP MSTG documentation. To address this, it would be beneficial to create a detailed guide on using Frida for jailed testing.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1624", @@ -27121,6 +28058,7 @@ "node_id": "MDU6SXNzdWU1NDg2MjI1ODE=", "title": "Double check Frida Detection", "body": "Verify which detection mechanisms are we already covering and which not:\r\n\r\nhttps://darvincitech.wordpress.com/2019/12/23/detect-frida-for-android/\r\nhttps://github.com/darvincisec/DetectFrida\r\n\r\nEvaluate if we should enhance the ones we already cover with some info from there. Or it'd be sufficient to just link to this page/repo as a useful resource.\r\n\r\nVerify if we have custom code and update linking to the code in the repo. Maybe even link to the exact lines.\r\n\r\na. Detect through named pipes used by Frida\r\nb. Detect through frida specific named thread\r\nc. Compare text section in memory with text section in disk", + "summary": "The issue involves a review of existing Frida detection mechanisms and identifying gaps in coverage. The process includes evaluating current detection methods against resources provided, such as articles and GitHub repositories. The goal is to determine whether to enhance existing code with new information or simply reference these resources. Additionally, there is a need to verify and update links to custom code in the repository, potentially providing direct links to specific lines. Key detection methods to focus on include named pipes, specific named threads, and memory comparison between text sections.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1629", @@ -27149,6 +28087,7 @@ "node_id": "MDU6SXNzdWU1NTA0NjE0ODE=", "title": "iOS level 2 crackme issues", "body": "**Describe the bug**\r\nThere are issues with the level 2 iOS crackme. \r\n\r\nI have already opened an issue on the main repository https://github.com/commjoen/uncrackable_app/issues/10 \r\n\r\nI am putting this out there just in case there is someone who is stuck with the challenge.\r\n\r\nThe spoiler free tldr is:\r\n- don't analyze the 64 bit version, the challenge is not cross platform at the moment. Due to some quirkiness, analyzing the 64 bit version can mislead you\r\n- you should not be able to solve this with dynamic analysis\r\n- you should not be able to find the expected password even statically, but you can recover the logic and then check that you were correct with the sources\r\n\r\nI say should because for unintended reasons the challenge could be impossible to solve, not because it is hard to solve it.\r\n\r\n**crackme or other challenge**\r\niOS crackme level 2, is a simple crackme with anti debugging checks. One of these checks is not working correctly\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Install the app\r\n2. Copy the intended solution from the https://github.com/commjoen/uncrackable_app or from the writeup\r\n3. The solution should fail the validation\r\n\r\n**Expected behavior**\r\nThe solution should be accepted\r\n\r\n**Screenshots**\r\nUnfortunately I don't have an iDevice to test on\r\n\r\n**Additional context**\r\nGhidra 9.1\r\nThe app for different reason should not work neither on 32 bit devices nor on 64 bit devices.\r\n", + "summary": "There are issues reported with the level 2 iOS crackme, particularly regarding the anti-debugging checks which are not functioning correctly. The challenge is currently not cross-platform, and analyzing the 64-bit version may lead to misleading results. It is suggested that solving the challenge through dynamic analysis is not feasible, and even static analysis may not yield the expected password. The expected behavior is that the provided solution should be accepted, but it fails validation when tested. To address this, further investigation into the anti-debugging checks and the compatibility of the app on different architectures may be needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1634", @@ -27177,6 +28116,7 @@ "node_id": "MDU6SXNzdWU1NTc0ODQ1ODQ=", "title": "Non-Proxy Aware Apps DNS method addition", "body": "The MSTG currently deals with non-proxy aware apps using one of the two methods:\r\n\r\n- iptables\r\n- ARP spoofing\r\n\r\nI performed a pentest using DNS hosts file method explained below. The root free version related to DNS is DNS spoofing. Let me know what you think about this potential addition to the MSTG. \r\n\r\n**Test setup:**\r\nTested on Genymotion Android 8.0 - API 26\r\n -- Applied Genymotion ARM Translation for 8.0\r\nApp tested: ORTEC Employee Self Service (Xamarin, no certificate pinning, root cert accepted)\r\n\r\n**Description hosts file method:**\r\nAdd root Burp CA (see MSTG)\r\nGenymotion: Network mode NAT\r\n\r\nSet-up a Burp listener:\r\n```\r\nProxy -> Options -> Proxy Listeners -> Add\r\nBind to port: 443 (if you want to intercept a different port choose something else)\r\nSpecific address: 192.168.56.1 (Genymotion NAT)\r\nRequest handling tab -> support invisible proxying enabled\r\n```\r\n\r\nRun TCPdump to discover the outgoing DNS requests:\r\n```\r\ntcpdump -i wlan0 port 53\r\n```\r\nEdit /etc/hosts using adb shell:\r\n```\r\nvi /etc/hosts\r\n192.168.56.1 example.com\r\n```\r\nTo test the set-up, start for example the ORTEC ESS app and fill in the domain example.com. You should now be able to intercept all the domains you added to the hosts file.\r\n\r\n**Description DNS spoofing method:**\r\nFor some reason Bettercap DNS spoofing doesn't really work to well for me. But here is how it can be done.\r\n\r\nGenymotion: Network mode Bridged (eth0 for example)\r\nAndroid Wi-Fi settings: set gateway to the Burp listener IP. (unsure about this, Bettercap seems to work randomly with and without a static gateway)\r\n\r\nSet-up a Burp listener:\r\n```\r\nProxy -> Options -> Proxy Listeners -> Add\r\nBind to port: 443 (if you want to intercept a different port choose something else)\r\nSpecific address: 192.168.83.51 (eth0 IP)\r\nRequest handling tab -> support invisible proxying enabled\r\n```\r\nRun TCPdump to discover the outgoing DNS requests:\r\n```\r\ntcpdump -i wlan0 port 53\r\n```\r\nStart bettercap and execute the following one-liner:\r\n```\r\nset dns.spoof.address 192.168.83.51; set dns.spoof.domains example.com; dns.spoof on\r\n```\r\nStart the ORTEC ESS app and fill in the domain example.com. You should be able to intercept the request!", + "summary": "The issue discusses the addition of a new DNS method for handling non-proxy aware applications in the MSTG. Currently, it covers two methods: iptables and ARP spoofing. The author proposes incorporating a DNS hosts file method and a DNS spoofing method based on their pentesting experience with an Android app.\n\nThe DNS hosts file method involves setting up a Burp listener and modifying the `/etc/hosts` file to intercept outgoing DNS requests. In contrast, the DNS spoofing method utilizes Bettercap to spoof DNS requests while the device is in bridged network mode.\n\nTo enhance the MSTG, it is suggested that the community considers these methods, particularly focusing on their effectiveness and reliability. Implementing these techniques could improve the toolkit for pentesters working with non-proxy aware applications.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1645", @@ -27206,6 +28146,7 @@ "node_id": "MDU6SXNzdWU1NTg2OTEwMzg=", "title": "[Tool Android] GDA Android Reversing Tool", "body": "**Describe the issue**\r\nMaybe it is worth to take a look and investigate this tool: https://www.kitploit.com/2020/02/gda-android-reversing-tool-new.html\r\nI think we haven't mentioned it anywhere in MSTG.\r\n\r\n\r\n\r\n", + "summary": "The issue raises a suggestion to investigate the GDA Android Reversing Tool, which has not been mentioned in the MSTG documentation. It is recommended to review the tool and consider its relevance or potential inclusion in the project.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1647", @@ -27235,6 +28176,7 @@ "node_id": "MDU6SXNzdWU1NzAzNzUwODQ=", "title": "Encrypted Shared Preferences not covered in MSTG-STORAGE-1 and MSTG-STORAGE-2", "body": "MSTG-STORAGE-1 and MSTG-STORAGE-2 lists proper usage of Shared Preferences, but doesn't provide guidelines for the new Encrypted Shared Preferences, which automatically encrypt the contents of the XML file.\r\n", + "summary": "The issue highlights that the current guidelines for Shared Preferences in MSTG-STORAGE-1 and MSTG-STORAGE-2 do not address Encrypted Shared Preferences, which automatically encrypts XML file contents. It suggests that the documentation should be updated to include best practices and guidelines specifically for using Encrypted Shared Preferences.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1662", @@ -27250,8 +28192,8 @@ "repository": 1127, "assignees": [], "labels": [ - 202, - 208 + 208, + 202 ] } }, @@ -27264,6 +28206,7 @@ "node_id": "MDU6SXNzdWU1OTc3OTY1NDI=", "title": "[iOS] Have a testcase for MSTG‑RESILIENCE‑13", "body": "**Platform:**\r\niOS\r\n\r\n**Description:**\r\nhttps://github.com/OWASP/owasp-mstg/issues/1494\r\n", + "summary": "The issue requests the creation of a test case for MSTG-RESILIENCE-13 specifically for the iOS platform. There is a reference to another GitHub issue for additional context. To address this, a test case that aligns with the requirements of MSTG-RESILIENCE-13 needs to be developed and documented.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1688", @@ -27293,6 +28236,7 @@ "node_id": "MDU6SXNzdWU1OTc5NzI5MTg=", "title": "Android crypto next level (MSTG-CRYPTO-1, MSTG-CRYPTO-2, MSTG-CRYPTO-3 and MSTG-CRYPTO-5)", "body": "**Describe the issue**\r\nWe need extend / describe more following topics sections:\r\n- [ ] Dynamic Analysis for MSTG-CRYPTO-01\r\n- [ ] Dynamic Analysis for MSTG-CRYPTO-2, MSTG-CRYPTO-3 and MSTG-CRYPTO-4\r\n- [ ] Dynamic Analysis for MSTG-CRYPTO-05\r\n- [ ] Key Rotation and key lifecycle management\r\n- [ ] GCMParameterSpec is only addressed a bit in anti-reverse engineering, while this needs more love in crypto (e.g.: no nonce reuse, limits of application, usage of associated data, etc.)\r\n- [ ] Signing needs more love (RSA-PSS as prefered over the older PKCS1.5 padding) due to padding oracle attacks\r\n- [ ] padding of RSA\r\n- [ ] elliptic curve (EC) usage\r\n- [ ] check for an alternative crypto libraries when the NDK is used instead of the SDK\r\n- [ ] verify use of BCFKS (bouncy castle FIPS implementation), discuss recommendation of Sponge Castel as it is not updated for some time.\r\n\r\nYou can assign me to that ticket (as more and more topics show off when I'm working on that).\r\n", + "summary": "The issue highlights the need to expand and detail various topics related to Android cryptography, specifically focusing on the following areas:\n\n- Dynamic analysis for several MSTG-CRYPTO categories\n- Key rotation and lifecycle management\n- More comprehensive coverage of GCMParameterSpec, including concerns like nonce reuse and associated data\n- Enhanced discussion on signing methods, favoring RSA-PSS over PKCS1.5 to mitigate padding oracle attacks\n- Addressing padding in RSA\n- Clarifying elliptic curve usage\n- Evaluating alternative cryptographic libraries when using the NDK\n- Verification of BCFKS usage and recommendations regarding the Sponge Castle library\n\nTo address this issue, it will be necessary to conduct thorough research and provide detailed explanations for each of these topics.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1689", @@ -27323,6 +28267,7 @@ "node_id": "MDU6SXNzdWU2MDkxNzAyNDc=", "title": "Warning message and/or preventing screenshot for personal data sensitive applications", "body": "**Platform:**\r\niOS, Android\r\n\r\n**Description:**\r\nThe current MSTG test cases for the screenshot on mobile devices state and restrains the screenshots for application when it is in the background. However, for applications that are sensitive to personal or financial data, this test case does not apply. \r\n\r\nA warning message for such applications when a screenshot is triggered would also bring users aware of such activities as well as acknowledge to consent that this action is done by the user itself. In which case, it would protect the app developer or the institution that it belongs to that if in such case the screenshot or data of this nature is leaked by malicious or other methods, the developer and the institution are not liable for such data leakage.\r\n\r\nOr in other methods, preventing this action within the app itself could also be enforced with financial or apps that are sensitive to personal information.\r\n\r\nIn sum, this would allow for a more secure operating environment for the users when using the app, as well as prevent malicious codes from running or capturing sensitive data from devices while the user is operating.\r\n\r\nPlease take under consideration for an additional test case on top of MSTG-STORAGE-9 to further improve on screenshot limitations. Thanks.", + "summary": "The issue discusses the need for enhanced security measures regarding screenshots in mobile applications that handle sensitive personal or financial data. The current MSTG test cases do not adequately address this concern, as they only restrict screenshots when the app is in the background. The suggestion is to either implement a warning message when a screenshot is taken or to entirely prevent the action within the app. This would not only raise user awareness but also help protect developers and institutions from potential liability in case of data leaks. The issue proposes adding a new test case to complement MSTG-STORAGE-9 to improve screenshot limitations for sensitive applications.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1709", @@ -27355,6 +28300,7 @@ "node_id": "MDU6SXNzdWU2MTIxMDI4Mjc=", "title": "Android quality code next level (MSTG-CODE-5)", "body": "**Describe the issue**\r\nWe need extend / describe more following topics sections:\r\n- [ ] Check [Android patch level](https://developer.android.com/reference/android/os/Build.VERSION#SECURITY_PATCH) - operating on up-to-date system is a good thing ;)\r\n- [ ] Check that security provider is up-to-date (mentioned in Crypto, and it also applies to MSTG-CODE-5)\r\n\r\nYou can assign me to that ticket.", + "summary": "The issue involves the need to enhance documentation on certain topics related to Android security and quality code. Specifically, it suggests extending the sections on checking the Android patch level and ensuring that the security provider is up-to-date. To address this, further elaboration and details about these topics are required. It is implied that someone can take on the task to work on these enhancements.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1713", @@ -27386,6 +28332,7 @@ "node_id": "MDU6SXNzdWU2MTMyMzE4NTM=", "title": "Cleanup in-chapter write-up-like content", "body": "Since we decided not to include write-ups it might be a good idea to do some clean up. Also because some information isn't actual anymore (e.g. from 3-4 years ago).\r\n\r\nWe can start by reducing/partially removing content and source code that's 1-1 copy of online articles/documents (approx. 300 lines). **Leaving proper summaries/highlights and references**.\r\n\r\nFor instance, in [this article](https://www.vantagepoint.sg/blog/82-hooking-android-system-calls-for-pleasure-and-benefit) we can find all word-by word- from section \"Customizing Android for Reverse Engineering\". \r\n\r\n- Building the Kernel\r\n- Booting Your New Kernel\r\n- Syscall Hooking using Kernel Modules + Patching the System Call Table\r\n- Example: File Hiding\r\n\r\n> Here we (and the article) is talking about this being applied to current Android versions such as Android 5/6. We cannot guarantee that this is still valid, therefore it might be a good idea to keep the references including publication dates, if not we will have to update and maintain this for each Android Version.\r\n\r\nWe could also summarize \"System Call Hooking with Kernel Modules\" and move that right before \"##### Process Exploration\" which is a better location for that. The more detailed info + examples can be found in the [online article](https://www.vantagepoint.sg/blog/82-hooking-android-system-calls-for-pleasure-and-benefit) and [documents](https://dl.packetstormsecurity.net/papers/general/HITB_Hacking_Soft_Tokens_v1.2.pdf).\r\n", + "summary": "The issue discusses the need to clean up content that resembles write-ups due to the decision not to include such material. There is a particular focus on outdated information that directly copies online articles, suggesting that approximately 300 lines of content and source code should be reduced or removed while retaining proper summaries, highlights, and references.\n\nSpecific examples of sections that require attention are provided, including \"Building the Kernel,\" \"Booting Your New Kernel,\" and \"Syscall Hooking using Kernel Modules + Patching the System Call Table,\" which contain outdated or potentially invalid information regarding current Android versions. It is recommended to include publication dates in references or to regularly update the content for each Android version.\n\nAdditionally, there is a suggestion to summarize \"System Call Hooking with Kernel Modules\" and reposition it for better context within the document. Overall, the task involves cleaning up outdated and duplicated content, ensuring that references are accurate, and reorganizing information for clarity.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1717", @@ -27414,6 +28361,7 @@ "node_id": "MDU6SXNzdWU2MjQwNDU2OTY=", "title": "Add a test case to check for sensitive information hardcoded", "body": "**Platform:**\r\nAndroid, iOS\r\n**Description:**\r\nThis test case includes detecting the following issues from the source code of the app:\r\n- Hard embedded sensitive information, for example: MSTG mentioned hard-coding encryption key in the source code at MSTG-CRYPTO-05, but developers can also embed other types of sensitive information like authentication information, sensitive tokens, app secret keys, etc.\r\n- The information about the back end infrastructure that is vulnerable: for example: hidden back-end endpoints, The URL leads to the Google Firebase database which is misconfigured.", + "summary": "The issue proposes the addition of a test case aimed at detecting hardcoded sensitive information in the source code of both Android and iOS applications. This includes identifying embedded encryption keys, authentication details, sensitive tokens, and app secret keys, as well as uncovering potentially vulnerable backend infrastructure, such as misconfigured URLs leading to databases like Google Firebase. To address this, a comprehensive test case should be developed to systematically check for these types of sensitive information in the codebase.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1752", @@ -27442,6 +28390,7 @@ "node_id": "MDU6SXNzdWU2MzExNjQ3NzQ=", "title": "iOS DeviceCheck to detect tampering", "body": "**iOS:**\r\n\r\n**Description:**\r\nI think Apple's DeviceCheck API can be used to mitigate app tampering. I can imagine there are use cases where you want to prevent against copy-cat or otherwise re-signed clients.\r\n\r\nImage using https://developer.apple.com/documentation/devicecheck/dcdevice/2902276-generatetoken to obtain a Token on device and verify-ing it with a Device Validation Request as described on https://developer.apple.com/documentation/devicecheck/accessing_and_modifying_per-device_data\r\n\r\nOnly apps that provide a valid token are allowed to receive some key piece of information that is required later on in the interaction between the app and it's server. The key piece of info could be a unique nonce or even the private key of a server generated key pair allowing follow up requests to be signed, thus indicating by delegation that the app is in fact the one true app as intended by its creator.\r\n", + "summary": "The issue suggests utilizing Apple's DeviceCheck API to enhance app security by detecting and preventing tampering, such as re-signed or copy-cat clients. It proposes generating a token on the device and validating it through a Device Validation Request. Only apps that present a valid token would gain access to crucial information, like a unique nonce or a server-generated private key, enabling secure communication with the server. \n\nTo move forward, implementation of the token generation and validation process is recommended to safeguard against unauthorized app usage.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1767", @@ -27472,6 +28421,7 @@ "node_id": "MDU6SXNzdWU3MDI3NDIwODY=", "title": "Review existing Source Code ", "body": "**Describe the issue**\r\nThere are a lot of inconstancies in the MSTG (old Swift, Java Code etc.) that might be outdated. Source code should be reviewed and redundancies and outdated code should be removed.\r\n\r\nAlso code which only describes how something is implemented or best practices should be removed and only a link to the original documentation should be used. ", + "summary": "The issue highlights inconsistencies in the MSTG, including outdated Swift and Java code. It calls for a review of the source code to eliminate redundancies and outdated content. Furthermore, it suggests removing code that merely explains implementations or best practices, replacing it with links to the original documentation. The next steps involve conducting a thorough code review and implementing the necessary changes to streamline the content.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1818", @@ -27500,6 +28450,7 @@ "node_id": "MDU6SXNzdWU3MDM1NTY3MTA=", "title": "MSTG-Platform-11 is missing as test case ", "body": "**Platform:**\r\niOS \r\n\r\n**Description:**\r\n\r\nSee also https://github.com/OWASP/owasp-masvs/issues/488\r\n\r\nRequirement in https://github.com/OWASP/owasp-masvs/blob/master/Document/0x11-V6-Interaction_with_the_environment.md could be changed to:\r\n\r\n6.11 | MSTG-PLATFORM-11 | Verify that the app prevents usage of custom third-party keyboards whenever sensitive data is entered (iOS only). |   | ✓\r\n-- | -- | -- | -- | --\r\n\r\n\r\n", + "summary": "The issue highlights the absence of MSTG-Platform-11 as a test case for iOS, specifically regarding the prevention of custom third-party keyboards when entering sensitive data. It suggests a modification to the existing requirement by including this test case in the specified documentation. To address this issue, the requirement needs to be updated to reflect the inclusion of MSTG-Platform-11.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1820", @@ -27528,6 +28479,7 @@ "node_id": "MDU6SXNzdWU3MDkxOTYzMjY=", "title": "[Tool] LibScout", "body": "What about mentioning https://github.com/reddr/LibScout in 0x05i -> ## Checking for Weaknesses in Third Party Libraries (MSTG-CODE-5) ?\r\n\r\nLet's verify its effectiveness first before including it.", + "summary": "The issue suggests considering the inclusion of the LibScout tool in the section related to checking for weaknesses in third-party libraries. However, it also emphasizes the need to first verify the tool's effectiveness before making any additions. It would be beneficial to conduct a review or assessment of LibScout's performance in this context.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1823", @@ -27557,6 +28509,7 @@ "node_id": "MDU6SXNzdWU3MTc5MzU3NjM=", "title": "Objection - more consistence in the guide", "body": "**Describe the issue**\r\n\r\nSimiliar to Frida (https://mobile-security.gitbook.io/mobile-security-testing-guide/general-mobile-app-testing-guide/0x04c-tampering-and-reverse-engineering#frida) objection should be described in the generic tampering section and then only what is relevant for iOS and Andorid in the specific chapters to avoid redundancies and link to it. \r\n\r\nObjection is described here:\r\n- https://mobile-security.gitbook.io/mobile-security-testing-guide/android-testing-guide/0x05b-basic-security_testing \r\n- https://mobile-security.gitbook.io/mobile-security-testing-guide/ios-testing-guide/0x06b-basic-security-testing#objection\r\n\r\nWhatever is redundant should be moved to here, for example after Frida https://mobile-security.gitbook.io/mobile-security-testing-guide/general-mobile-app-testing-guide/0x04c-tampering-and-reverse-engineering#frida. \r\n\r\n\r\n**Optional: Testcase or section**\r\nDescribe the testcase or section which has the issue.\r\n\r\n**Optional: Additional context**\r\nAdd any other context about the issue here.\r\n", + "summary": "The issue highlights the need for consistency in the documentation regarding the tool \"Objection\" within the mobile security testing guide. The author suggests that Objection should be introduced in the general tampering section, similar to how Frida is handled, and then specific details for iOS and Android should be provided in their respective chapters. This approach aims to eliminate redundancy across the guides. Redundant content should be consolidated and linked appropriately to improve clarity and organization. \n\nTo address this, the documentation should be reviewed to identify and relocate repetitive information regarding Objection to a central location in the general tampering section.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1838", @@ -27583,6 +28536,7 @@ "node_id": "MDU6SXNzdWU3MTgzNDU5ODU=", "title": "0x05d Refactor - Remove Info already available Online", "body": "Do after closing #1840 !\r\n\r\nReview the new Overview and remove all information that can be already found online. For instance:\r\nThe section for \"Key Attestation\" has a lot of information, most of it addresing the developers. For that there is the official site: \r\n[Key Attestation](https://developer.android.com/training/articles/security-key-attestation \"Key Attestation\"). It should be evaluated if we are really providing added value here **for the tester**. If not, it should be enough to read that site.", + "summary": "The issue suggests a refactor to streamline documentation by removing information that is already accessible online, particularly focusing on sections like \"Key Attestation.\" It emphasizes the need to evaluate whether the content offers added value for testers, and if not, it may be sufficient to direct users to the official resources instead. The task should be undertaken after closing a related issue (#1840).", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1841", @@ -27612,6 +28566,7 @@ "node_id": "MDU6SXNzdWU3Mjg5NDA2MTc=", "title": "0x05h - Add Android 11 Changes and update android 10", "body": "The Testing App Permissions section at the start mentions changes in 8/9/10(beta) but not 11.\r\nShould implement the following:\r\n- Add android 11 changes to list\r\n- Change Android 10 changes from beta to release(and verify the information is still correct for release)\r\n", + "summary": "The issue highlights the need to update the Testing App Permissions section by including the changes related to Android 11. Additionally, it calls for the modification of Android 10 changes from beta to release status, ensuring that the information provided is accurate for the release version. To address this, the relevant updates for Android 11 must be added, and the Android 10 information needs verification and adjustment.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1848", @@ -27638,6 +28593,7 @@ "node_id": "MDU6SXNzdWU3MzM2MjE5NDQ=", "title": "0x05d - Expand firebase and add scoped storage", "body": "Once done with refactor it is probably worthwhile going into this section and adding a test case for online databases like firebase as the current mention is quite brief and not really in the right section (local storage MSTG-PLATFORM-1).\r\n\r\nFurthermore it would be worth while adding in scoped storage and the changes that brings to data storage.", + "summary": "The issue discusses the need to expand the documentation regarding Firebase and scoped storage. It highlights that the current information on online databases is insufficient and misplaced under local storage. Additionally, it suggests incorporating details about scoped storage and its implications for data storage. The recommendation is to enhance the documentation by adding relevant test cases and updated information on these topics.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1853", @@ -27653,9 +28609,9 @@ "repository": 1127, "assignees": [], "labels": [ + 208, 202, - 206, - 208 + 206 ] } }, @@ -27668,6 +28624,7 @@ "node_id": "MDU6SXNzdWU3NjAzMTc2MDA=", "title": "Testing for setWebContentsDebuggingEnabled() is Enabled", "body": "**Platform:**\r\nAndroid\r\n**Description:**\r\nI think it would be nice to add a testcase for checking that `setWebContentsDebuggingEnabled()` is enabled or not.\r\n[Reference 1(blog.compass-security.com)](https://blog.compass-security.com/2019/10/introducing-web-vulnerabilities-into-native-apps)\r\n[Reference 2 (dev.to)](https://dev.to/ashishb/android-security-don-t-leave-webview-debugging-enabled-in-production-5fo9)", + "summary": "The issue suggests the addition of a test case to verify whether `setWebContentsDebuggingEnabled()` is enabled in an Android application. This is important for ensuring security, as leaving web view debugging enabled in production can expose vulnerabilities. It may be beneficial to implement a test that checks the status of this setting to enhance the application's security posture.", "state": "open", "state_reason": "reopened", "url": "https://github.com/OWASP/owasp-mastg/issues/1867", @@ -27696,6 +28653,7 @@ "node_id": "MDU6SXNzdWU4MzU4OTczMjU=", "title": "[Tool] strongarm Static Analyzer", "body": "Try and evaluate for integration to the MSTG.\n\nhttps://github.com/datatheorem/strongarm", + "summary": "The issue suggests evaluating the strongarm Static Analyzer for potential integration into the MSTG framework. It implies a need for assessment of its capabilities and compatibility with existing systems. A thorough analysis and testing of the tool's features and performance may be required to determine how it could enhance MSTG.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1881", @@ -27725,6 +28683,7 @@ "node_id": "MDU6SXNzdWU4NDM0OTE1MzU=", "title": "Reading system log with socat does not work (anymore?)", "body": "As described here: https://github.com/OWASP/owasp-mstg/blob/master/Document/0x06b-Basic-Security-Testing.md\r\n\r\nThere you find the following command:\r\n\r\n`iPhone:~ root# socat - UNIX-CONNECT:/var/run/lockdown/syslog.sock`\r\n\r\nI tried it on my iPhone (iOS 14.2) but I get always the following error message: `no such file or directory`.\r\n\r\nMaybe it does not working anymore on iOS 14.2? Or I am doing something wrong?", + "summary": "The issue reports that using the `socat` command to read the system log from `syslog.sock` on an iPhone running iOS 14.2 results in a \"no such file or directory\" error. It is suggested that this method may no longer be functional on this iOS version, but it is unclear if there might be an error in the execution. Further investigation into the current compatibility of this command with iOS 14.2 and any alternative methods for accessing the system log may be needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1886", @@ -27740,8 +28699,8 @@ "repository": 1127, "assignees": [], "labels": [ - 211, - 216 + 216, + 211 ] } }, @@ -27754,6 +28713,7 @@ "node_id": "MDU6SXNzdWU4NDk5MjgzNDg=", "title": "0x5d - Android AccountManager not yet addressed", "body": "Security implications? when to use it instead of the Keystore?\r\n- https://developer.android.com/training/articles/security-tips.html#Credentials\r\n- https://developer.android.com/training/id-auth/authenticate.html", + "summary": "The issue discusses the lack of attention to the Android AccountManager, particularly regarding its security implications and the appropriate scenarios for its use compared to the Keystore. The discussion references Android's official documentation for security best practices and authentication methods. To resolve this issue, a deeper analysis of the AccountManager's security features and guidelines for its proper implementation might be necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1890", @@ -27783,6 +28743,7 @@ "node_id": "MDU6SXNzdWU4NTUxMDA1NDQ=", "title": "[Tool] Android Static Analyzers", "body": "Try and evaluate for integration to the MASTG.\r\n\r\nhttps://github.com/detekt/detekt\r\nhttps://github.com/find-sec-bugs/find-sec-bugs\r\n\r\nKeep an eye on kotlin final support by https://github.com/semgrep/semgrep", + "summary": "The issue suggests evaluating Android static analyzers for potential integration into the MASTG. Specifically, it points to Detekt and Find Security Bugs as candidates. Additionally, it mentions the need to monitor Kotlin final support from Semgrep. To move forward, a review of these tools for compatibility and effectiveness in enhancing security analysis for Android projects is necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1895", @@ -27811,6 +28772,7 @@ "node_id": "MDU6SXNzdWU4ODE4NDAwNjc=", "title": "[Android] Anti-debug and Anti-memory dump techniques", "body": "Can you check if some of the below techniques taken from [here](https://github.com/darvincisec/AntiDebugandMemoryDump)\r\ncan be included ?\r\n1. Check for JDWP string in /proc/self/task/comm as an indication of app is debuggable\r\n2. Use of inotify to detect memory dump", + "summary": "The issue discusses the potential inclusion of anti-debug and anti-memory dump techniques in an Android project. Specifically, it suggests checking for the JDWP string in the `/proc/self/task/comm` directory to determine if the app is debuggable and utilizing inotify to detect memory dumps. It may be beneficial to implement these techniques to enhance the app's security against debugging and memory extraction.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1907", @@ -27839,6 +28801,7 @@ "node_id": "MDU6SXNzdWU5MjEwNTQ5ODY=", "title": "Review iOS Secure Data Storage options", "body": "- use SQLCipher? see: https://github.com/corona-warn-app/cwa-app-ios/blob/release/2.5.x/src/xcode/ENA/ENA/Source/Workers/SQLiteKeyValueStore.swift\r\n- review suggestion for encrypted CoreData. \r\n- https://cocoacasts.com/is-core-data-encrypted\r\n- https://www.cuelogic.com/blog/a-guide-to-securing-your-core-data-on-an-ios-device", + "summary": "The issue highlights the need to review iOS secure data storage options, specifically considering the use of SQLCipher and evaluating the possibilities for encrypted CoreData. Relevant resources have been provided for further investigation into these topics. To address this issue, a thorough assessment of these storage methods and their implementations should be conducted.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1912", @@ -27868,6 +28831,7 @@ "node_id": "MDU6SXNzdWU5MzAxMDIzODY=", "title": "Android Package Visibility", "body": "Include one short point max. 1-2 paragraphs about Package Visibility in https://github.com/OWASP/owasp-mstg/blob/master/Document/0x05h-Testing-Platform-Interaction.md\r\n\r\nFirst search and confirm the section where it should belong.\r\n\r\nReference:\r\n\r\nhttps://medium.com/androiddevelopers/working-with-package-visibility-dc252829de2d", + "summary": "The issue suggests adding a brief section on Android Package Visibility to the OWASP Mobile Security Testing Guide. It indicates that the information should be integrated into the relevant section of the document linked in the issue. To address this, one should review the document to identify the appropriate section for inclusion and then draft a concise summary that highlights the importance of package visibility in Android development.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1917", @@ -27898,6 +28862,7 @@ "node_id": "MDU6SXNzdWU5NTIzNzg2MDk=", "title": "Release Process Improvements", "body": "To-Do pushed to next release:\r\n- [ ] Parse the log output in GitHub actions to detect overflows that might cause issues in the produced artefacts (PDF etc.); meaning missing line breaks (happens for example with source code)\r\n- [ ] Add contributors automatically in MSTG document build\r\n- [ ] Add captions automatically to pictures with python script from @cpholguera \r\n- [ ] Auto-generate excel checklists with Carlos python script\r\n- [ ] Clarify around donations (https://owasp.org/www-project-mobile-security-testing-guide/, OWASP https://owasp.org/www-policy/operational-historical/project-sponsorship.html) \r\n - [ ] Specify size of logo for companies that were donating for each category\r\n - [ ] Check duration of sponsorship so we can remove once expired (add date to each donator?)\r\n- [ ] Automate cover creation for Lulu.com\r\n\r\nThis is much faster: extract the first page and then convert:\r\n```\r\nfrom pdf2image import convert_from_path\r\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\r\n“”\"\r\npdf2image on macOS needs: brew install poppler\r\nSee https://github.com/Belval/pdf2image#how-to-install\r\n“”\"\r\ndef split(path, name_of_split):\r\n pdf = PdfFileReader(path)\r\n pdf_writer = PdfFileWriter()\r\n pdf_writer.addPage(pdf.getPage(0))\r\n output = f’{name_of_split}.pdf’\r\n with open(output, ‘wb’) as output_pdf:\r\n pdf_writer.write(output_pdf)\r\nif __name__ == ‘__main__‘:\r\n path = ‘OWASP_MSTG-1.2.pdf’\r\n split(path, ‘cover_v1.2’)\r\n convert_from_path(‘cover_v1.2.pdf’)[0].save(‘cover_v1.2.png’, ‘PNG’)\r\n```", + "summary": "The issue outlines several tasks aimed at improving the release process. Key items to address include:\n\n- Parsing log outputs in GitHub Actions to identify potential overflow issues in generated artifacts.\n- Automating the addition of contributors to the MSTG document and captions for images using a specified Python script.\n- Generating Excel checklists automatically.\n- Clarifying details regarding donations, including specifying logo sizes for donors and tracking sponsorship durations.\n- Automating the creation of covers for Lulu.com using a provided Python script.\n\nTo proceed, focus on implementing the mentioned automation and clarity enhancements in the release process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1933", @@ -27913,8 +28878,8 @@ "repository": 1127, "assignees": [], "labels": [ - 210, - 218 + 218, + 210 ] } }, @@ -27927,6 +28892,7 @@ "node_id": "I_kwDOBCbAAs47arzo", "title": "[Tool] Update Frida based testing to use frida-swift-bridge", "body": "**Platform:**\r\niOS\r\n**Description:**\r\nA test case for the swift based application using frida-swift-bridge\r\n\r\nhttps://github.com/frida/frida-swift-bridge\r\n", + "summary": "The issue discusses the need to update Frida-based testing for an iOS application to utilize the frida-swift-bridge. This involves integrating the bridge into the existing test case framework for Swift applications. The next steps should include implementing the frida-swift-bridge in the testing setup and ensuring compatibility with the current testing processes.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1935", @@ -27957,6 +28923,7 @@ "node_id": "I_kwDOBCbAAs49jhqp", "title": "Centralize Crackmes", "body": "Move \r\n- https://github.com/OWASP/owasp-mstg/tree/master/Crackmes \r\n- https://github.com/OWASP/MSTG-Hacking-Playground\r\n\r\nto https://github.com/OWASP/mstg-crackmes/\r\n\r\nRelated to https://github.com/OWASP/owasp-mstg/issues/1015, #1541 ", + "summary": "The issue discusses the need to centralize Crackmes by relocating them from two specified repositories to a new repository at https://github.com/OWASP/mstg-crackmes/. This consolidation aims to improve organization and accessibility. It is also linked to previous discussions in related issues. The action required involves transferring the relevant Crackmes to the new repository.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1946", @@ -27985,6 +28952,7 @@ "node_id": "I_kwDOBCbAAs49pDZM", "title": "iOS Pasteboard transparency and Secure Paste", "body": "Shortly introduce these two topics in https://github.com/OWASP/owasp-mstg/blob/master/Document/0x06h-Testing-Platform-Interaction.md#uipasteboard\n\n\nThis avoids information leakage leaving it up to the user. Since iOS 14/15.\n\n## Pasteboard transparency\n\niOS and iPadOS notify you when an app is copying content from the pasteboard so you’re aware of the activity. Developers can allow you to paste content from another app without having access to what you’ve copied until you want them to.\n\nhttps://www.apple.com/privacy/control/\n\n## Secure Paste\n\nWith secure paste, developers can let users paste from a different app without having access to what was copied until the user takes action to paste it into their app. When developers use secure paste, users will be able to paste without being alerted via the pasteboard transparency notification, helping give them peace of mind.\n\nhttps://www.apple.com/newsroom/2021/06/apple-advances-its-privacy-leadership-with-ios-15-ipados-15-macos-monterey-and-watchos-8\n", + "summary": "The issue suggests enhancing the documentation on iOS Pasteboard transparency and Secure Paste in the OWASP MSTG repository. It highlights the importance of these features introduced in iOS 14/15 to prevent information leakage. Pasteboard transparency informs users when an app accesses their clipboard, while Secure Paste allows pasting content without prior access to the copied data, thus eliminating the notification alert. To address this issue, the documentation should be updated to include a brief introduction to both topics, emphasizing their roles in user privacy and security.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1949", @@ -28000,9 +28968,9 @@ "repository": 1127, "assignees": [], "labels": [ + 211, 205, - 206, - 211 + 206 ] } }, @@ -28015,6 +28983,7 @@ "node_id": "I_kwDOBCbAAs49pD5Z", "title": "[iOS Privacy] Limited Library to avoid Storage Permission", "body": "With enhanced Photos limited library access, developers can offer smart functionality — like a recent photos folder for specific albums — even when a user has only granted limited access.\n\nhttps://www.apple.com/newsroom/2021/06/apple-advances-its-privacy-leadership-with-ios-15-ipados-15-macos-monterey-and-watchos-8/\n\n\nMany apps may only need read-only access to retrieve images to share on the internet or embed in a document or email. For these purposes, the simplest way to provide an enhanced user experience is to use PHPickerViewController to access the Photos library.\n\nPHPickerViewController is a new picker that replaces UIImagePickerController. Its user interface matches that of the Photos app, supports search and multiple selection of photos and videos, and provides fluid zooming of content. Because the system manages its life cycle in a separate process, it’s private by default. The user doesn’t need to explicitly authorize your app to select photos, which results in a simpler and more streamlined user experience.\n\nhttps://developer.apple.com/documentation/photokit/delivering_an_enhanced_privacy_experience_in_your_photos_app", + "summary": "The issue discusses the implementation of limited library access in iOS, which allows developers to create features such as a recent photos folder even when users only grant limited access to their photo library. It highlights the use of PHPickerViewController as a preferred method for accessing photos, as it provides a user-friendly interface and operates with improved privacy, eliminating the need for explicit user authorization.\n\nTo enhance user experience while maintaining privacy, developers should consider integrating PHPickerViewController in their applications to streamline photo selection and retrieval processes.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1950", @@ -28030,9 +28999,9 @@ "repository": 1127, "assignees": [], "labels": [ + 211, 205, - 206, - 211 + 206 ] } }, @@ -28045,6 +29014,7 @@ "node_id": "I_kwDOBCbAAs4-WCck", "title": "[Tool] xpcspy", "body": "Mention xpcspy in the Dynamic Analysis section:\n\nhttps://github.com/OWASP/owasp-mstg/blob/master/Document/0x06d-Testing-Data-Storage.md#determining-whether-sensitive-data-is-exposed-via-ipc-mechanisms-mstg-storage-6\n\nhttps://github.com/hot3eed/xpcspy\n\nAdd to the Tools chapter.", + "summary": "The issue suggests incorporating xpcspy into the Dynamic Analysis section of the MSTG documentation, specifically in relation to testing data storage and the exposure of sensitive data via IPC mechanisms. It also requests the addition of xpcspy to the Tools chapter. To address this, the relevant sections of the documentation should be updated to include information about xpcspy and its applications in dynamic analysis.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1968", @@ -28073,6 +29043,7 @@ "node_id": "I_kwDOBCbAAs4-zDq6", "title": "[Android Privacy] New BLE permissions and Companion Device Pairing to avoid location permission", "body": "> Android 12 introduces the BLUETOOTH_SCAN, BLUETOOTH_ADVERTISE, and BLUETOOTH_CONNECT permissions. These permissions make it easier for apps that target Android 12 to interact with Bluetooth devices, especially for apps that don't require access to device location.\r\n>\r\n> Note: The Companion Device Manager provides a more streamlined method of connecting to companion devices. The system provides the pairing UI on behalf of your app. If you want more control over the pairing and connecting experience, use the Bluetooth permissions introduced in Android 12.\r\n>\r\n> ---\r\n>\r\n> Companion Device Manager: On devices running Android 8.0 (API level 26) and higher, companion device pairing performs a Bluetooth or Wi-Fi scan of nearby devices on behalf of your app without requiring the ACCESS_FINE_LOCATION permission. This helps maximize user privacy protections.\r\n\r\nhttps://developer.android.com/about/versions/12/features#bluetooth-permissions\r\n\r\nhttps://developer.android.com/guide/topics/connectivity/companion-device-pairing\r\n\r\nMore info regarding Android 13 (more privacy) here: https://android-developers.googleblog.com/2022/02/first-preview-android-13.html", + "summary": "The issue discusses the new Bluetooth permissions introduced in Android 12, specifically BLUETOOTH_SCAN, BLUETOOTH_ADVERTISE, and BLUETOOTH_CONNECT, which allow apps to interact with Bluetooth devices without needing location access. It highlights the benefits of using the Companion Device Manager for easier pairing and connecting to devices while ensuring user privacy, particularly on devices running Android 8.0 and above. It suggests that developers who require more control over the pairing experience might consider utilizing the new Bluetooth permissions instead. \n\nTo address the update, developers should review their app's permissions in light of these changes and consider implementing the Companion Device Manager for improved user privacy.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1974", @@ -28103,6 +29074,7 @@ "node_id": "I_kwDOBCbAAs4_ID5H", "title": "[Tool] FlowDroid", "body": "https://github.com/secure-software-engineering/FlowDroid\n\nhttps://youtu.be/S89_V9DGtrk", + "summary": "The GitHub issue discusses a potential enhancement for the FlowDroid tool, focusing on improving its functionality or performance. The provided YouTube link may contain additional context or a demonstration related to the proposed changes. It suggests that further investigation or development is needed to implement the enhancements discussed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1983", @@ -28132,6 +29104,7 @@ "node_id": "I_kwDOBCbAAs4_ea9w", "title": "[SMS 2FA] - Enhance SMS-delivered code security with domain-bound codes", "body": "By including the URL of the intended website within the SMS, it would mean websites and apps could automatically detect and read a 2FA SMS message, inputting the data. This would certainly be more convenient than remembering and then typing the keycode in. However, more importantly, by ensuring the code would only work with a specific, intended website, the plan could eliminate the risk of falling for a scam, whereby a user might unwittingly enter their 2FA code into a phishing site.\r\n\r\n- [Enhance SMS-delivered code security with domain-bound codes](https://developer.apple.com/news/?id=z0i801mg)\r\n- [Explainer: Origin-bound one-time codes delivered via SMS](https://github.com/WICG/sms-one-time-codes)\r\n\r\n\r\n> From the [Spec by Google and Apple](https://wicg.github.io/sms-one-time-codes/):\r\n>\r\n> 4. Security considerations\r\n> \r\n> This specification attempts to mitigate the phishing risk associated with the delivery of one-time codes over SMS by enabling User Agents to know what website the one-time code is intended for.\r\n> \r\n> This specification does not attempt to mitigate other risks associated with the delivery of one-time codes over SMS, such as SMS spoofing, SIM swapping, SIM cloning, ISMI-catchers, or interception of the message by an untrusted party.\r\n> \r\n> Sites would do well to consider using non-SMS technologies such as [WEBAUTHN] for authentication or verification.\r\n> \r\n> 5. Privacy considerations\r\n> \r\n> Any party which has access to a user’s SMS messages (such as the user’s cellular carrier, mobile operating system, or anyone who intercepted the message) can learn that the user has an account on the service identified in an origin-bound one-time code message delivered over SMS.\r\n> \r\n> On some platforms, User Agents might need access to all incoming SMS messages—even messages which are not origin-bound one-time code messages—in order to support the autofilling of origin-bound one-time codes delivered over SMS in origin-bound one-time code messages.", + "summary": "The issue discusses the enhancement of SMS-delivered code security by implementing domain-bound codes, which would include the URL of the intended website in the SMS. This would allow websites and apps to automatically detect and input the 2FA code, improving user convenience and reducing the risk of phishing attacks. The proposal references a specification aimed at mitigating phishing risks by associating the one-time code with a specific website while acknowledging that other security risks related to SMS delivery remain unaddressed. The issue suggests that developers consider alternative, more secure authentication methods. \n\nTo move forward, it may be beneficial to evaluate the implementation of domain-bound codes and assess their potential impact on security and user experience.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/1991", @@ -28148,8 +29121,8 @@ "assignees": [], "labels": [ 202, - 207, - 211 + 211, + 207 ] } }, @@ -28162,6 +29135,7 @@ "node_id": "I_kwDOBCbAAs5AjTJ3", "title": "Use Custom Linting Rules", "body": "To help complying with our style guide we can develop custom linting rules to be executed locally and on our markdown-lint Action.\r\n\r\nSee:\n https://github.com/DavidAnson/markdownlint/blob/main/doc/CustomRules.md\n\nhttps://www.npmjs.com/package/markdownlint-rule-search-replace", + "summary": "The issue suggests developing custom linting rules to ensure adherence to the project's style guide, applicable both locally and through the markdown-lint Action. It references documentation on creating custom rules and a specific npm package that may facilitate this process. The next steps involve exploring these resources and implementing the necessary custom rules.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2012", @@ -28190,6 +29164,7 @@ "node_id": "I_kwDOBCbAAs5Ax2kW", "title": "Enhance General Mobile Security Testing chapter", "body": "We should enhance the chapter with clear guidelines on setting up the scope and expectations with the client. This way we can refer back to a section here whenever a test might get too complex and time consuming.\r\n\r\n\r\nhttps://github.com/OWASP/owasp-mstg/blob/master/Document/0x04b-Mobile-App-Security-Testing.md#coordinating-with-the-client\r\n\r\nThis idea came after discussing https://github.com/OWASP/owasp-mstg/pull/1988", + "summary": "The issue suggests enhancing the General Mobile Security Testing chapter by adding clear guidelines on establishing the scope and expectations with clients. This addition aims to create a reference point for situations where testing may become overly complex or time-consuming. It would be beneficial to update the relevant section in the document to improve clarity and usability for testers.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2014", @@ -28216,6 +29191,7 @@ "node_id": "I_kwDOBCbAAs5A5LbO", "title": "[Android] Add Windows Subsystem for Android (WSA)", "body": "https://sensepost.com/blog/2021/android-application-testing-using-windows-11-and-windows-subsystem-for-android/", + "summary": "The issue discusses the need to integrate Windows Subsystem for Android (WSA) into the project, highlighting its potential for enhancing Android application testing on Windows 11. The request emphasizes the importance of supporting this subsystem to leverage its capabilities for developers. It may be beneficial to explore the provided link for insights on implementing this feature effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2019", @@ -28232,8 +29208,8 @@ "assignees": [], "labels": [ 202, - 212, - 219 + 219, + 212 ] } }, @@ -28246,6 +29222,7 @@ "node_id": "I_kwDOBCbAAs5Cfdab", "title": "Android Nearby Connections API", "body": "Nearby Connections is a peer-to-peer networking API that allows apps to easily discover, connect to, and exchange data with nearby devices in real-time, regardless of network connectivity.\n\n> We don’t recommend sending messages back and forth using the Nearby server (even though it’s possible).\nFirst, the messages are not encrypted and you shouldn’t publish sensitive information through the Nearby Server. Instead, once you verify close physical proximity through the Nearby APIs, you can transition to something like Firebase Cloud Messaging (FCM) to continue the communication.\n\nhttps://medium.com/androiddevelopers/better-physical-stories-with-googles-nearby-apis-280be707bbf9\n\n\n> Caution: While authentication is optional, connections established without authentication are insecure, and can expose devices to severe security vulnerabilities. To avoid this, always use authentication to secure your connections.\n\nhttps://developers.google.com/nearby/connections/android/manage-connections\n\n\n> [•••] connection requests are delivered to ConnectionLifecycleCallback#onConnectionInitiated(). [•••] The connection shown in the preceding snippet is accepted automatically, but you should take further precautions when sharing sensitive data.\nThe API provides a unique token per connection through the ConnectionInfo object of onConnectionInitiated(String endpointId, ConnectionInfo info). You may obtain the token from info.getAuthenticationDigits().\nWe recommend displaying the tokens to both users, so they can visually confirm they are equal. If visual confirmation isn’t feasible because the users cannot see each other’s screens, you can have one device encrypt the raw token, send it to the other device for decryption and comparison before you start sharing the sensitive content.\n> [•••] While the two devices are connected, they can send content to each other through the sendPayload() method call and the onPayloadReceived() callback.\nA Payload can be anything your users want to share: streams, files, music, videos, photos, text, etc.\n\nhttps://medium.com/androiddevelopers/two-way-communication-without-internet-nearby-connections-b118530cb84d\n\n", + "summary": "The issue discusses the use of the Android Nearby Connections API for peer-to-peer networking, emphasizing best practices for secure communication between devices. It advises against using the Nearby server for message exchanges due to security concerns, suggesting a shift to Firebase Cloud Messaging (FCM) for ongoing communication after establishing proximity. The issue highlights the importance of authentication to prevent vulnerabilities and recommends visually confirming connection tokens between users or using encryption methods for sensitive data sharing. Additionally, it notes that devices can exchange various types of payloads once connected. \n\nTo enhance security, implement authentication and consider using encryption for token verification.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2044", @@ -28277,6 +29254,7 @@ "node_id": "I_kwDOBCbAAs5C5gvH", "title": "[Android] Improve Activity Testing to include implicit Intent attacks ", "body": "Chapter **0x04h-Testing-Code-Quality, Testing for Injection Flaws (MSTG-PLATFORM-2)** could provide a better background to intent hijacking and spoofing (see e.g https://onlinelibrary.wiley.com/doi/abs/10.1002/spe.2643).\r\n\r\nWhat's missing in the MSTG is testing for a tech I read about [here](https://blog.oversecured.com/Interception-of-Android-implicit-intents/). Implicit intents are rarely discovered by automated tools but can contain some critical flaws. For example in the provided blog post an implicit intent is used to call a file picker dialog. The returned file path is not properly checked and an attacker could provide a malicious app that responds to the implicit intent with arbitrary file uri's.\r\n\r\n**ENHANCEMENT:**\r\n\r\n(originally in #1962)\r\n\r\n2. OWASP section related to implicit intents doesn't say about `startActivityForResult(...)` that can return values via `setResult(...)`. If content pickers are used (such as `android.intent.action.GET_CONTENT` or `android.intent.action.PICK`), it leads to theft and/or overwrite of arbitrary files (in the first case, if the URI's content is saved to a public directory; in the second case if the attacker can control output path via, e.g. `Uri.getLastPathSegments()` or `_display_name` when a vulnerable app queries it from the attacker-controlled content provider)\r\n\r\n3. Activities that handle `android.intent.action.SEND` accept a Uri to share. They cache the contents somewhere and then process it as a file. The attacks are the same as in the previous item.", + "summary": "The issue highlights the need for improvements in the testing of Android activities, specifically regarding implicit Intent attacks, which are often overlooked by automated tools. It suggests that the current guidelines in the MSTG do not adequately address risks associated with implicit intents, such as intent hijacking and the potential for arbitrary file access through compromised content providers. The enhancement proposal includes updating the OWASP section to cover vulnerabilities related to `startActivityForResult(...)` and the handling of `android.intent.action.SEND`, which may lead to file theft or overwriting due to improper URI management. \n\nTo address these concerns, it is recommended to incorporate more comprehensive testing strategies and documentation regarding implicit intents and their associated risks in the next iteration of the guidelines.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2052", @@ -28306,6 +29284,7 @@ "node_id": "I_kwDOBCbAAs5DeFZM", "title": "[Android Privacy] Photo Picker to avoid Storage Permission (Android 13)", "body": "Add to [\"Testing App Permissions (MSTG-PLATFORM-1) > Permission Analysis AND Dynamic Analysis\"](https://github.com/OWASP/owasp-mstg/blob/master/Document/0x05h-Testing-Platform-Interaction.md#permission-analysis)\r\n\r\n**Do after #1993!!**\r\n\r\n> To help protect photo and video privacy of users, Android 13 adds a system photo picker — a standard and optimized way for users to share both local and cloud-based photos securely. Android’s long standing document picker allows a user to share specific documents of any type with an app, without that app needing permission to view all media files on the device. The photo picker extends this capability with a dedicated experience for picking photos and videos. **Apps can use the [photo picker APIs](https://developer.android.com/about/versions/13/features/photopicker) to access the shared photos and videos without needing permission to view all media files on the device**.\r\n\r\n```kotlin\r\nval intent = Intent(MediaStore.ACTION_PICK_IMAGES)\r\nintent.putExtra(MediaStore.EXTRA_PICK_IMAGES_MAX, maxNumPhotosAndVideos) // optional, e.g. val maxNumPhotosAndVideos = 10\r\n```\r\n\r\n> Note: Upcoming Google Play system updates are expected to include new features related to the photo picker. In one such update, the library will add support for apps that target Android 11 (API level 30) or higher (excluding Android Go devices).\r\n\r\nMore info regarding Android 13 here: https://android-developers.googleblog.com/2022/02/first-preview-android-13.html\r\n\r\nRelated (iOS counterpart): #1950 ", + "summary": "The issue discusses the implementation of the new system photo picker introduced in Android 13, which allows apps to access shared photos and videos without requiring permission to view all media files on the device. This feature aims to enhance user privacy for photo and video sharing. The request is to add this information to the relevant section of the Testing App Permissions documentation, specifically under Permission Analysis and Dynamic Analysis.\n\nIt is noted that this task should be completed after addressing another related issue (#1993). The issue includes a sample code snippet for using the photo picker APIs and references upcoming Google Play updates that will support this feature for apps targeting Android 11 and higher. Additionally, there's a mention of a related issue for iOS.\n\nTo proceed, one should incorporate the details of the Android 13 photo picker into the specified documentation section while ensuring the dependency on issue #1993 is respected.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2062", @@ -28332,6 +29311,7 @@ "node_id": "I_kwDOBCbAAs5DyyWz", "title": "[Test Case] Dynamic Pinning", "body": "Test for dynamic pinning.\r\n\r\n> (From wultra/ssl-pinning-ios)\r\n> The SSL pinning (or [public key, or certificate pinning](https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning)) is a technique mitigating [Man-in-the-middle attacks](https://en.wikipedia.org/wiki/Man-in-the-middle_attack) against the secure HTTP communication. The typical iOS solution is to bundle the hash of the certificate, or the exact data of the certificate to the application and validate the incoming challenge in the URLSessionDelegate. This in general works well, but it has, unfortunately, one major drawback of the certificate's expiration date. The certificate expiration forces you to update your application regularly before the certificate expires, but still, some percentage of the users don't update their apps automatically. So, the users on the older version, will not be able to contact the application servers.\r\n> \r\n> The solution to this problem is the dynamic SSL pinning, where the list of certificate fingerprints are securely downloaded from the remote server.\r\n\r\nSee:\r\nhttps://github.com/wultra/ssl-pinning-android\r\nhttps://github.com/wultra/ssl-pinning-ios\r\n\r\n", + "summary": "The issue discusses the need for a test case focused on dynamic pinning for SSL, addressing the challenges posed by certificate expiration in standard SSL pinning methods. Dynamic SSL pinning aims to resolve the issue of users being unable to access application servers due to outdated certificates by securely downloading a list of certificate fingerprints from a remote server. To move forward, a test case should be developed that validates this dynamic pinning approach.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2063", @@ -28362,6 +29342,7 @@ "node_id": "I_kwDOBCbAAs5GSU84", "title": "[Tool] medusa", "body": "Add medusa along with objection, etc.?\r\n\r\nhttps://github.com/Ch0pin/medusa\r\n\r\nSee it in action: \r\n- Testing WebViews: https://valsamaras.medium.com/when-equal-is-not-another-webview-takeover-story-730be8d6e202\r\n- Testing JNI: https://valsamaras.medium.com/tracing-jni-functions-75b04bee7c58\r\n- Cracking Android Uncrackable Level 2: https://valsamaras.medium.com/just-another-cracking-the-uncrackable-2266cbb61680\n\n\nSoon expanding to iOS:\n\nhttps://twitter.com/Ch0pin/status/1717101408019718388?t=gwHdE-jW5goKlI1oAwDvjw&s=19", + "summary": "The issue suggests adding Medusa alongside other tools like Objection. It provides links to various resources showcasing Medusa's capabilities, including testing WebViews, JNI, and cracking Android applications. Additionally, there is a mention of plans to expand support to iOS. To move forward, it may be necessary to integrate Medusa into the existing framework and consider its compatibility with iOS.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2091", @@ -28392,6 +29373,7 @@ "node_id": "I_kwDOBCbAAs5I8irK", "title": "Testing iOS App Clips", "body": "Research & try out App Clips, add a section about its privacy and security implications and add tests if needed.\r\n\r\n> An App Clip is a lightweight version of your app that offers some of its functionality when and where it’s needed. It offers a focused feature set and is designed to launch instantly, protect user privacy, and preserve resources. As a result, an App Clip comes with some limitations.\r\n\r\nSee privacy implications in [Preserve User Privacy](https://developer.apple.com/documentation/app_clips/choosing_the_right_functionality_for_your_app_clip).\r\n\r\nMore:\r\n- https://developer.apple.com/documentation/app_clips/sharing_data_between_your_app_clip_and_your_full_app\r\n- https://developer.apple.com/documentation/authenticationservices/implementing_user_authentication_with_sign_in_with_apple", + "summary": "The issue involves testing iOS App Clips and requires researching their functionality while focusing on privacy and security implications. It suggests adding a dedicated section that addresses these concerns and recommends implementing tests if necessary. To proceed, one should review the provided documentation on user privacy and data sharing between App Clips and full apps.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2104", @@ -28421,6 +29403,7 @@ "node_id": "I_kwDOBCbAAs5JAg48", "title": "[MSTG-PLATFORM-10] Additional option to prevent caching", "body": "### MSTG Chapter\n\n0x06d-testing-data-storage\n\n### File Line Number\n\n448\n\n### Context\n\nI came across this article: https://blog.silentsignal.eu/2016/05/06/ios-http-cache-analysis-for-abusing-apis-and-forensics/ which talked about setting the Cache-Control header to prevent caching on iOS devices among other things. I tested it out on iOS 13.4.1 with modified responses containing `Cache-Control: no-cache, no-store`. The WebKit folders and application specific folder containing Cache.db were not created.", + "summary": "The issue discusses the need for an additional option to prevent caching in the context of the MSTG Chapter on testing data storage. The author references an article that highlights the importance of setting the Cache-Control header to avoid caching on iOS devices. Testing on iOS 13.4.1 with a modified response containing `Cache-Control: no-cache, no-store` resulted in the absence of WebKit folders and the application-specific Cache.db file. \n\nTo address this, it may be necessary to implement or document additional methods for effectively preventing caching on iOS devices.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2106", @@ -28449,6 +29432,7 @@ "node_id": "I_kwDOBCbAAs5LRXcU", "title": "iOS Developer Mode", "body": "Research and analyze security impact:\r\n\r\nhttps://developer.apple.com/documentation/xcode/enabling-developer-mode-on-a-device\r\n\r\nAt least mention it in Fundamentals / Basic Security testing (0x06b).", + "summary": "The issue discusses the need to research and analyze the security implications of enabling Developer Mode on iOS devices, as outlined in Apple's documentation. It suggests that this topic should be addressed in the Fundamentals section, specifically under Basic Security Testing (0x06b). To address this issue, the team should investigate the potential security risks associated with Developer Mode and incorporate relevant findings into the specified section.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2109", @@ -28480,6 +29464,7 @@ "node_id": "I_kwDOBCbAAs5LRu7I", "title": "Add References to Frida Book", "body": "Add references to https://learnfrida.info/ which extends the MSTG Dynamic Binary Instrumentation (DBI) testing techniques.\r\n\r\nIn Document/0x08-Testing-Tools.md, create a section right before \"#### Frida for Android\" and call it \"#### Frida Handbook\". It should have the following content:\r\n\r\n- very short intro about the book.\r\n- short listing about what you can learn in the book (from the high-level topics in https://learnfrida.info/)\r\n\r\nApart from that, we can include links to some parts of the book, for example:\r\n- In Document/0x08-Testing-Tools.md (around line 64) include https://learnfrida.info/advanced_usage/#stalker\r\n- In Document/0x08-Testing-Tools.md in the \"r2frida\" section put a link to https://learnfrida.info/r2frida/", + "summary": "The issue suggests adding a new section titled \"#### Frida Handbook\" to the Document/0x08-Testing-Tools.md file, located just before the \"#### Frida for Android\" section. This new section should include a brief introduction to the Frida book and a summary of the key learning topics available on the website https://learnfrida.info/. Additionally, the issue recommends including specific links within the document, such as linking to the advanced usage of Stalker and the r2frida section on the Frida website. This enhancement aims to provide users with more resources and insights related to Frida and its applications in dynamic binary instrumentation testing.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2110", @@ -28512,6 +29497,7 @@ "node_id": "I_kwDOBCbAAs5Lh03Z", "title": "Update 0x05j with Play Integrity", "body": "Google will discontinue SafetyNet API in favor of Play Integrity API\r\n\r\nhttps://developer.android.com/google/play/integrity/migrate\r\n\r\nWe should update the SafetyNet section accordingly.", + "summary": "The issue highlights the need to update the 0x05j section to replace the deprecated SafetyNet API with the Play Integrity API, as Google is phasing out SafetyNet. The relevant migration information can be found on the provided link. It is suggested to revise the documentation to reflect this change.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2113", @@ -28527,8 +29513,8 @@ "repository": 1127, "assignees": [], "labels": [ - 202, - 209 + 209, + 202 ] } }, @@ -28541,6 +29527,7 @@ "node_id": "I_kwDOBCbAAs5NCBrC", "title": "Enhance iOS Platform Overview Chapter", "body": "@vadimszzz has written a nice document including an overview of the iOS platform which is located in the following section:\n\nhttps://github.com/vadimszzz/iOS-Internals-and-Security-Testing#ios-platform-overview\n\nWe can use this to enhance our current chapter.\n\nhttps://github.com/OWASP/owasp-mstg/blob/master/Document/0x06a-Platform-Overview.md\n\nPlease keep in mind that we'd like to keep the content as short and comprehensive as possible. Some parts from the \"iOS Internals & Security Testing\" document might be a perfect fit for the MSTG and others might be out of scope. Please use the current MSTG chapter outline as a guide.", + "summary": "The issue discusses the need to enhance the iOS Platform Overview chapter in the MSTG documentation by incorporating relevant content from an existing document that provides an overview of the iOS platform. The goal is to create a more concise and comprehensive chapter while ensuring that the content aligns with the current MSTG chapter outline. It is suggested to selectively integrate appropriate sections from the referenced document while maintaining focus on relevance and brevity.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2135", @@ -28572,6 +29559,7 @@ "node_id": "I_kwDOBCbAAs5NHuR4", "title": "[0x06j] Upgrade Test Case: Jailbreak Detection (MSTG-RESILIENCE-1)", "body": "Upgrade section \"Bypassing Jailbreak Detection\" in https://github.com/OWASP/owasp-mstg/blob/master/Document/0x06j-Testing-Resiliency-Against-Reverse-Engineering.md#jailbreak-detection-mstg-resilience-1\r\n\r\nTODO:\r\n\r\n- simplify\r\n- remove hopper (and images), use radare2 instead\r\n- remove cycript, use frida instead\r\n- or simply reduce all to objection\r\n\r\n", + "summary": "The issue discusses the need to upgrade the \"Bypassing Jailbreak Detection\" section in the MSTG documentation. Key tasks include simplifying the content, replacing Hopper and its associated images with radare2, substituting cycript with frida, or consolidating the methods to just use objection.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2141", @@ -28602,6 +29590,7 @@ "node_id": "I_kwDOBCbAAs5NuQhG", "title": "Add Hide Sensitive Content from Clipboard\n\nMSTG-STORAGE-7", "body": "https://developer.android.com/about/versions/13/behavior-changes-all#copy-sensitive-content", + "summary": "The issue discusses the need to implement a feature that hides sensitive content from being copied to the clipboard, in line with the behavior changes introduced in Android 13. This enhancement aims to enhance user privacy by preventing sensitive information from being inadvertently exposed through the clipboard. The task involves evaluating the current clipboard handling methods and ensuring that sensitive data is appropriately masked or restricted when copied.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2159", @@ -28617,10 +29606,10 @@ "repository": 1127, "assignees": [], "labels": [ + 208, 202, 205, - 206, - 208 + 206 ] } }, @@ -28633,6 +29622,7 @@ "node_id": "I_kwDOBCbAAs5QfnuS", "title": "Using Google Paranoid to Test for Bad Crypto", "body": "1. Demo/little writeup how we could use this to test for crypto in Android and iOS apps. \r\n\r\nhttps://github.com/google/paranoid_crypto\r\n\r\n2. Propose a list MASTG test cases to enhance with this.\r\n3. Enhance the MASTG test case(s).", + "summary": "The issue discusses using Google Paranoid for testing cryptographic implementations in Android and iOS applications. It suggests creating a demonstration or a brief write-up detailing how to apply this tool effectively for testing. Additionally, there is a proposal to compile a list of MASTG (Mobile Application Security Testing Guide) test cases that could be improved with the integration of Google Paranoid. The final goal is to enhance the identified MASTG test case(s) to improve their effectiveness in cryptographic testing. \n\nTo move forward, consider creating the demo/write-up and compiling the proposed MASTG test cases for enhancement.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2184", @@ -28648,8 +29638,8 @@ "repository": 1127, "assignees": [], "labels": [ - 212, - 217 + 217, + 212 ] } }, @@ -28662,6 +29652,7 @@ "node_id": "I_kwDOBCbAAs5RMbAB", "title": "[Bug] Swift language not supported by pandoc", "body": "### MASTG Chapter\n\n0x06j\n\n### File Line Number\n\nvarious\n\n### Context\n\nPandoc doesn't support swift with the current version, so code blocks have been reverted to ```default. This ticket is a reminder to revert those changes when pandoc supports swift.", + "summary": "The issue notes that the current version of Pandoc does not support the Swift programming language, resulting in code blocks being set to the default format. The ticket serves as a reminder to revert these changes once Pandoc adds support for Swift. To resolve the issue, monitoring updates from Pandoc regarding Swift support is necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2230", @@ -28688,6 +29679,7 @@ "node_id": "I_kwDOBCbAAs5UEvjC", "title": "Add Passkeys ", "body": "Add theory for passkeys to https://github.com/OWASP/owasp-mastg/blob/master/Document/0x04e-Testing-Authentication-and-Session-Management.md\r\n\r\n- https://android-developers.googleblog.com/2022/10/bringing-passkeys-to-android-and-chrome.html\r\n\r\n- https://developer.apple.com/passkeys/\r\n\r\nAnswer in this ticket:\r\n\r\n- what are the risks that an app is protecting against when using passkeys? Maybe this can become a recommended mitigation for those risks. Right now in the proposal we have [one test (Passwordless Authentication Not Implemented)](https://docs.google.com/spreadsheets/d/1Go5GpVvKJqTDxGbSLBPZb1hmYi5lXRc1D1AfrTTkUkY/edit#gid=706661277&range=C43) for this.\r\n- what are the potential risks when using passkeys. What are the tests we could write to address those risks?\r\n", + "summary": "The issue proposes adding a section on passkeys to the OWASP Mastg documentation, specifically focusing on their implementation in authentication and session management. It suggests including a discussion on the risks that applications are protecting against when using passkeys, as well as identifying potential risks associated with their use. The issue highlights the need for tests that could help address these risks, beyond the existing test for passwordless authentication. \n\nTo move forward, it would be beneficial to outline the specific risks and develop corresponding test cases that assess the security of passkeys in applications.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2283", @@ -28702,8 +29694,8 @@ "author": 474, "repository": 1127, "assignees": [ - 476, - 496 + 496, + 476 ], "labels": [ 207 @@ -28719,6 +29711,7 @@ "node_id": "I_kwDOBCbAAs5UGtNg", "title": "Check for Pending Intents", "body": "Add theory about \"Pending Intents\" to 0x5h.\r\n\r\nAdd test case to check for vulnerable pending intents.\r\n\r\n\r\n> Originally suggested in https://github.com/OWASP/owasp-mastg/pull/1962#issuecomment-956202664\r\n> look for https://www.blackhat.com/eu-21/briefings/schedule/index.html#re-route-your-intent-for-privilege-escalation-an-universal-way-to-exploit-android-pendingintents-in-high-profile-and-system-apps-24340. They will report a new vector on how to get access to providers via PendingIntents (a lot of apps are vulnerable to that right now!)", + "summary": "The issue discusses the need to enhance the documentation related to \"Pending Intents\" in the 0x5h section. It suggests adding theoretical information and creating a test case to identify vulnerable pending intents. The request is backed by a reference to a presentation that highlights a new exploitation vector for accessing providers via Pending Intents, indicating a prevalent vulnerability in many applications. To address this, the documentation and testing framework should be updated accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2285", @@ -28748,6 +29741,7 @@ "node_id": "I_kwDOBCbAAs5XeOB9", "title": "[Bug] Device Monitor is Deprecated", "body": "### MASTG Component\r\n\r\nhttps://mas.owasp.org/MASTG/techniques/android/MASTG-TECH-0032/\r\n\r\n### Context\r\n\r\nThe \"Android Device Monitor\" is deprecated and has to be updated with the new tooling. Here's the table with the alternatives: https://developer.android.com/studio/profile/monitor\r\n\r\nThanks @Laancelot ", + "summary": "The issue reports that the \"Android Device Monitor\" is deprecated and needs to be updated with newer tooling alternatives. A reference table outlining these alternatives is provided in the issue. The necessary action involves reviewing the current implementation and transitioning to the recommended tools listed in the table.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2329", @@ -28763,9 +29757,9 @@ "repository": 1127, "assignees": [], "labels": [ + 216, 202, - 204, - 216 + 204 ] } }, @@ -28778,6 +29772,7 @@ "node_id": "I_kwDOBCbAAs5bekKB", "title": "Testing Preload (system) Apps ", "body": "Add how to instrument system applications on Android stock images to https://mas.owasp.org/MASTG/Android/0x05c-Reverse-Engineering-and-Tampering/\n\nMaybe link to this paper https://arxiv.org/abs/2302.01890 (to be confirmed)\r\n\r\nReference: https://blog.talosintelligence.com/how-to-instrument-system-applications-on-android-stock-images/\r\n", + "summary": "The issue discusses the need to update the OWASP MASTG documentation by including instructions on how to instrument system applications on Android stock images. It suggests linking to a relevant research paper for further reference and provides a resource from Talos Intelligence for additional context. The task involves integrating this information into the existing documentation on reverse engineering and tampering.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2344", @@ -28793,8 +29788,8 @@ "repository": 1127, "assignees": [], "labels": [ - 202, 209, + 202, 214 ] } @@ -28808,6 +29803,7 @@ "node_id": "I_kwDOBCbAAs5eL6e7", "title": "Android 14 Updates ", "body": "Review and apply to theory and future v2 tests.\n\nhttps://developer.android.com/about/versions/14/behavior-changes-all\n\nhttps://developer.android.com/about/versions/14/behavior-changes-14\n\nSee https://arstechnica.com/gadgets/2023/02/android-14-preview-1-is-out-will-officially-ban-installation-of-old-apps/\n\n", + "summary": "The issue discusses the need to review and implement updates related to Android 14 in the context of theory and future version 2 tests. It includes links to relevant resources detailing behavior changes and updates for Android 14. The next steps involve analyzing these changes and integrating them into the testing framework.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2370", @@ -28834,6 +29830,7 @@ "node_id": "I_kwDOBCbAAs5eoi43", "title": "[Tool] Add frida-android-helper", "body": "Pretty nice tool to quickly do stuff from the commandline:\r\n\r\nhttps://github.com/Hamz-a/frida-android-helper", + "summary": "The issue suggests adding the tool \"frida-android-helper,\" which facilitates quick command-line operations. It includes a link to the tool's GitHub repository for reference. To address this issue, consideration should be given to integrating the tool into the existing project or documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2374", @@ -28860,6 +29857,7 @@ "node_id": "I_kwDOBCbAAs5eoj1y", "title": "[Tool] patch-apk", "body": "A tool to easily pull packages from the device, repackage them with frida-server, update NSC, sign and install it again. The original project has been archived, but this fork of a fork has some good features. It's possible that a better form exists.\r\n\r\nThe main advantage of this one is that it supports pulling split-apks .\r\n\r\nhttps://github.com/jseigelis/patch-apk\r\n\r\n**TODO:** Add to \"Tools\" as a dedicated file and reference it from the relevant Technique.\r\n", + "summary": "A GitHub issue discusses a tool called \"patch-apk,\" which facilitates pulling packages from a device, repackaging them with frida-server, updating NSC, and reinstalling them. The original project has been archived, but this fork offers valuable features, particularly the ability to pull split-apks. It suggests that there may be superior alternatives available. \n\nThe key action item is to add this tool to the \"Tools\" section as a dedicated file and reference it in the relevant technique documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2375", @@ -28886,6 +29884,7 @@ "node_id": "I_kwDOBCbAAs5eozvG", "title": "[Bug] Xamarin ", "body": "### MASTG Chapter\n\n0x05g-Testing-Network-Communication.md\n\n### File Line Number\n\n399, might have changed... :-) \n\n### Context\n\nWe should find all instances of 3rd party frameworks like Xamarin and summarise them in another chapter, as we are focusing on native apps ", + "summary": "A bug has been identified regarding the MASTG Chapter 0x05g-Testing-Network-Communication.md, specifically around line 399. The issue highlights the need to locate all mentions of 3rd party frameworks, such as Xamarin, and compile them into a separate chapter to align with the focus on native applications. The next steps involve reviewing the document for these instances and organizing the information accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2376", @@ -28912,6 +29911,7 @@ "node_id": "I_kwDOBCbAAs5mgICN", "title": "[Tool] Add jtool2", "body": "We should add jtool2 to `tools/ios`, and add it in various test cases where it can easily help us.\r\n\r\nFor example, extracting the entitlements from an iOS binary currently uses r2 and binwalk, while jtool2 has `jtool2 --ent `", + "summary": "The issue suggests adding jtool2 to the `tools/ios` directory and incorporating it into several test cases to enhance functionality. Specifically, it points out that jtool2 can simplify the process of extracting entitlements from an iOS binary, which is currently done using r2 and binwalk. The next steps would involve integrating jtool2 into the relevant test cases and documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2418", @@ -28940,6 +29940,7 @@ "node_id": "I_kwDOBCbAAs5s77Gx", "title": "Add Android Credential Manager with passkeys", "body": "https://android-developers.googleblog.com/2023/07/credential-manager-beta-easy-secure-authentication-with-passkeys-on-android.html?m=1\n\n> Credential Manager is a new Jetpack library that allows app developers to simplify their users' authentication journey, while also increasing security with support of passkeys.\n> ...\n> Credential Manager brings support for passkeys, a new passwordless authentication mechanism, together with traditional sign-in methods, such as passwords and federated sign-in, into a single interface for the user and a unified API for developers.", + "summary": "The issue proposes the integration of the Android Credential Manager, a new Jetpack library that enhances user authentication by supporting passkeys alongside traditional sign-in methods. This feature aims to streamline the authentication process and improve security for app users. The task involves implementing this library to provide a unified interface for users and a cohesive API for developers.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2428", @@ -28969,6 +29970,7 @@ "node_id": "I_kwDOBCbAAs5s77rK", "title": "[Tool] Hetty for MITM?", "body": "https://github.com/dstotijn/hetty\n\nHetty is an HTTP toolkit for security research. It aims to become an open source alternative to commercial software like Burp Suite Pro, with powerful features tailored to the needs of the infosec and bug bounty community.\n\n\nWarning ⚠️: last release 1 Year ago (March 29, 2022)\n\nAt least we have it here as a reference.", + "summary": "The issue discusses Hetty, an HTTP toolkit intended for security research and an open-source alternative to commercial tools like Burp Suite Pro. It highlights that the last release was a year ago, prompting concern about its current status and future updates. The community may need to consider contributing to or enhancing the tool to ensure its ongoing development and relevance.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2429", @@ -28998,6 +30000,7 @@ "node_id": "I_kwDOBCbAAs5x98W0", "title": "[Tool] Add BinDiff by Google", "body": "\n\nhttps://github.com/google/bindiff", + "summary": "The issue discusses the integration of BinDiff, a tool developed by Google for binary diffing, into the project. It highlights the potential benefits of using BinDiff for analyzing binary files and suggests exploring the necessary steps for its implementation. To move forward, the team should evaluate the dependencies and compatibility of BinDiff with the current project setup.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2441", @@ -29028,6 +30031,7 @@ "node_id": "I_kwDOBCbAAs5yCQfP", "title": "[Technique] Update Android Traffic Interception (Android 14)", "body": "https://httptoolkit.com/blog/android-14-install-system-ca-certificate/", + "summary": "The issue discusses the need to update the Android Traffic Interception technique to accommodate changes introduced in Android 14. There are specific steps outlined in the provided link regarding installing a system CA certificate that are necessary for proper interception functionality. To resolve the issue, it would be beneficial to implement the updated methods for certificate installation and ensure compatibility with the latest Android version.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2442", @@ -29056,6 +30060,7 @@ "node_id": "I_kwDOBCbAAs5zBHEx", "title": "OWASP_MAS_Checklist excel broken", "body": "Is it just me or do the FAIL/PASS statuses change random cell block colours on the excel check list sheet? \r\n\r\nRELATES TO: https://github.com/OWASP/owasp-mastg/issues/2415", + "summary": "The issue involves unexpected behavior in the OWASP_MAS_Checklist Excel file, where the FAIL/PASS statuses are causing random cell block colors to change. To resolve this, it may be necessary to investigate the formatting or conditional formatting settings within the Excel sheet to ensure consistent behavior.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2454", @@ -29082,6 +30087,7 @@ "node_id": "I_kwDOBCbAAs58coFq", "title": "[Tool] gplaycli", "body": "### MASTG Chapter\n\nMASTG-TECH-0003.md\n\n### File Line Number\n\n22\n\n### Context\n\nthe tool doesn't seems to work and stackoverflow suggested using https://github.com/3052/google ", + "summary": "The issue notes that the tool gplaycli is not functioning properly. A suggestion from Stack Overflow recommends using an alternative tool found at a specific GitHub repository. It may be helpful to investigate the functionality of the suggested alternative to resolve the issue with gplaycli.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2512", @@ -29110,6 +30116,7 @@ "node_id": "I_kwDOBCbAAs5-Sl5b", "title": "[MASWE-0002] Sensitive Data Stored With Insufficient Access Restrictions in Internal Locations", "body": "## Description\r\n\r\nCreate a new risk for \"Sensitive Data Stored With Insufficient Access Restrictions in Internal Locations (MASVS-STORAGE-2)\" using the following information:\r\n\r\nSensitive data may be stored in internal locations without ensuring exclusive app access (e.g. by using the wrong file permissions) and may be accessible to other apps.\r\n\r\n\r\n\r\nCreate \"`risks/MASVS-STORAGE/2-***-****/data-insufficient-access-restrictions-internal/risk.md`\" including the following content:\r\n\r\n```yaml\r\n---\r\ntitle: Sensitive Data Stored With Insufficient Access Restrictions in Internal Locations\r\nalias: data-insufficient-access-restrictions-internal\r\nplatform: [android]\r\nprofiles: [L1, L2]\r\nmappings:\r\n masvs-v1: [MSTG-STORAGE-2]\r\n masvs-v2: [MASVS-STORAGE-2]\r\n mastg-v1: [MASTG-TEST-0052, MASTG-TEST-0001]\r\n\r\n---\r\n\r\n## Overview\r\n\r\n## Impact\r\n\r\n## Modes of Introduction\r\n\r\n## Mitigations\r\n\r\n```\r\n\r\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\r\n\r\nUse at least the following references:\r\n\r\n- \r\n\r\nWhen creating the corresponding tests, use the following areas to guide you:\r\n\r\n- File permissions (Android)\r\n\r\n**MASTG v1 Refactoring:**\r\n\r\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\r\n\r\n- [MASTG-TEST-0052 - Testing Local Data Storage (ios)](https://mas.owasp.org/MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0052/)\r\n- [MASTG-TEST-0001 - Testing Local Storage for Sensitive Data (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0001/)\r\n\r\n## Acceptance Criteria\r\n\r\n- [ ] The risk has been created in the correct directory (`risks/MASVS-STORAGE/2-***-****/data-insufficient-access-restrictions-internal/risk.md`)\r\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\r\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\r\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\r\n", + "summary": "A new risk related to \"Sensitive Data Stored With Insufficient Access Restrictions in Internal Locations\" needs to be created under the MASVS-STORAGE category. The issue highlights the potential for sensitive data to be improperly secured due to incorrect file permissions, allowing access from other applications.\n\nThe task involves creating a markdown file at the specified directory path and populating it with defined sections such as Overview, Impact, Modes of Introduction, and Mitigations, following provided guidelines. Additionally, references and test guidance about file permissions on Android are included. \n\nAcceptance criteria specify that the risk must be correctly placed, formatted according to guidelines, and linked to relevant MASTG tests. A GitHub issue for corresponding tests should also be created.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2540", @@ -29141,6 +30148,7 @@ "node_id": "I_kwDOBCbAAs5-Sl6A", "title": "[MASWE-0003] Backup Unencrypted", "body": "> This prev. open PR could contain useful inputs: https://github.com/OWASP/owasp-mastg/pull/2604/files#diff-a6472df266173afc665035280a844525ce81374d2b343070dfd37a24deffa541\r\n\r\n## Description\r\n\r\nCreate a new risk for \"Backup Unencrypted (MASVS-STORAGE-2)\" using the following information:\r\n\r\nThe app may not encrypt sensitive data in backups, which may compromise data confidentiality.\r\n\r\n\r\n\r\nCreate \"`risks/MASVS-STORAGE/2-***-****/backup-unencrypted/risk.md`\" including the following content:\r\n\r\n```yaml\r\n---\r\ntitle: Backup Unencrypted\r\nalias: backup-unencrypted\r\nplatform: [android]\r\nprofiles: [L2]\r\nmappings:\r\n masvs-v1: [MSTG-STORAGE-8]\r\n masvs-v2: [MASVS-STORAGE-2, MASVS-PRIVACY-1]\r\n mastg-v1: [MASTG-TEST-0058, MASTG-TEST-0009]\r\n\r\n---\r\n\r\n## Overview\r\n\r\n## Impact\r\n\r\n## Modes of Introduction\r\n\r\n## Mitigations\r\n\r\n```\r\n\r\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\r\n\r\nUse at least the following references:\r\n\r\n- \r\n\r\nWhen creating the corresponding tests, use the following areas to guide you:\r\n\r\n- Backup Device Conditions clientSideEncryption and deviceToDeviceTransfer Not Checked (Android)\r\n\r\n**MASTG v1 Refactoring:**\r\n\r\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\r\n\r\n- [MASTG-TEST-0058 - Testing Backups for Sensitive Data (ios)](https://mas.owasp.org/MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0058/)\r\n- [MASTG-TEST-0009 - Testing Backups for Sensitive Data (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/)\r\n\r\n## Acceptance Criteria\r\n\r\n- [ ] The risk has been created in the correct directory (`risks/MASVS-STORAGE/2-***-****/backup-unencrypted/risk.md`)\r\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\r\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\r\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\r\n", + "summary": "The issue outlines the task of creating a new risk entry titled \"Backup Unencrypted\" for the MASVS-STORAGE-2 category. This risk highlights the potential compromise of data confidentiality due to the lack of encryption for sensitive data in backups. \n\nThe implementation involves creating a markdown file at the specified directory with defined sections: Overview, Impact, Modes of Introduction, and Mitigations, following the provided guidelines. It also emphasizes the importance of including specific references and related tests from the MASTG.\n\nAcceptance criteria include ensuring the risk is correctly placed in the directory, content adheres to the guidelines, and at least one GitHub Issue is created for corresponding tests based on the identified modes of introduction. If accomplished, this will enhance the documentation and testing framework related to data backup risks.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2541", @@ -29170,6 +30178,7 @@ "node_id": "I_kwDOBCbAAs5-Sl6s", "title": "[MASWE-0004] Sensitive Data Not Excluded From Backup", "body": "## Description\r\n\r\nCreate a new risk for \"Sensitive Data Not Excluded From Backup (MASVS-STORAGE-2)\" using the following information:\r\n\r\nsensitive data can be excluded to prevent it from being backed up.\r\n\r\n\r\n\r\nCreate \"`risks/MASVS-STORAGE/2-***-****/data-not-excluded-backup/risk.md`\" including the following content:\r\n\r\n```yaml\r\n---\r\ntitle: Sensitive Data Not Excluded From Backup\r\nalias: data-not-excluded-backup\r\nplatform: [android, ios]\r\nprofiles: [L1, L2, P]\r\nmappings:\r\n masvs-v1: [MSTG-STORAGE-8]\r\n masvs-v2: [MASVS-STORAGE-2, MASVS-PRIVACY-1]\r\n mastg-v1: [MASTG-TEST-0058, MASTG-TEST-0009]\r\n\r\n---\r\n\r\n## Overview\r\n\r\n## Impact\r\n\r\n## Modes of Introduction\r\n\r\n## Mitigations\r\n\r\n```\r\n\r\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\r\n\r\nUse at least the following references:\r\n\r\n- \r\n- \r\n\r\nWhen creating the corresponding tests, use the following areas to guide you:\r\n\r\n- `android:fullBackupContent` (Android 11-) or `android:dataExtractionRules` (Android 12+)\r\n- iOS `isExcludedFromBackup` (iOS)\r\n- cryptographic keys in backups (?)\r\n\r\n**MASTG v1 Refactoring:**\r\n\r\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\r\n\r\n- [MASTG-TEST-0058 - Testing Backups for Sensitive Data (ios)](https://mas.owasp.org/MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0058/)\r\n- [MASTG-TEST-0009 - Testing Backups for Sensitive Data (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/)\r\n\r\n## Acceptance Criteria\r\n\r\n- [ ] The risk has been created in the correct directory (`risks/MASVS-STORAGE/2-***-****/data-not-excluded-backup/risk.md`)\r\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\r\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\r\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\r\n", + "summary": "A new risk titled \"Sensitive Data Not Excluded From Backup\" needs to be created as part of the MASVS-STORAGE-2 specification. The structure for this risk should be placed in the directory `risks/MASVS-STORAGE/2-***-****/data-not-excluded-backup/risk.md` and must include specific sections such as Overview, Impact, Modes of Introduction, and Mitigations, following the guidelines from the provided document.\n\nAdditionally, references from Android’s documentation regarding backup exclusion should be incorporated, along with testing areas related to backups for sensitive data on both Android and iOS platforms. Acceptance criteria include correct directory placement, adherence to content guidelines, creation of corresponding test issues, and proper metadata linkage to MASTG v1 tests.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2542", @@ -29201,6 +30210,7 @@ "node_id": "I_kwDOBCbAAs5-Sl9Y", "title": "[MASWE-0008] Device Access Security Policy Not Enforced", "body": "## Description\r\n\r\nCreate a new risk for \"Device Access Security Policy Not Enforced (MASVS-STORAGE-1)\" using the following information:\r\n\r\nThe app may not enforce device access security policy (e.g. device passcode) and may allow for unauthorized access to sensitive data.\r\n\r\n\r\n\r\nCreate \"`risks/MASVS-STORAGE/1-***-****/device-access-policy-not-enforced/risk.md`\" including the following content:\r\n\r\n```yaml\r\n---\r\ntitle: Device Access Security Policy Not Enforced\r\nalias: device-access-policy-not-enforced\r\nplatform: [android, ios]\r\nprofiles: [L2]\r\nmappings:\r\n masvs-v1: [MSTG-STORAGE-11]\r\n masvs-v2: [MASVS-STORAGE-1, MASVS-AUTH-2]\r\n mastg-v1: [MASTG-TEST-0064, MASTG-TEST-0012, MASTG-TEST-0017]\r\n\r\n---\r\n\r\n## Overview\r\n\r\n## Impact\r\n\r\n## Modes of Introduction\r\n\r\n## Mitigations\r\n\r\n```\r\n\r\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\r\n\r\n\r\n\r\nWhen creating the corresponding tests, use the following areas to guide you:\r\n\r\n- user set a device passcode via isDeviceSecure() on Android\r\n- LAContext.canEvaluatePolicy() on iOS\r\n\r\n**MASTG v1 Refactoring:**\r\n\r\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\r\n\r\n- [MASTG-TEST-0064 - Testing Local Authentication (ios)](https://mas.owasp.org/MASTG/tests/android/MASVS-AUTH/MASTG-TEST-0064/)\r\n- [MASTG-TEST-0012 - Testing the Device-Access-Security Policy (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0012/)\r\n- [MASTG-TEST-0017 - Testing Confirm Credentials (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-AUTH/MASTG-TEST-0017/)\r\n\r\n## Acceptance Criteria\r\n\r\n- [ ] The risk has been created in the correct directory (`risks/MASVS-STORAGE/1-***-****/device-access-policy-not-enforced/risk.md`)\r\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\r\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\r\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\r\n\r\n", + "summary": "A new risk titled \"Device Access Security Policy Not Enforced\" needs to be created under the directory `risks/MASVS-STORAGE/1-***-****/device-access-policy-not-enforced/risk.md`. This risk addresses the potential for unauthorized access to sensitive data due to the app not enforcing device access security policies, such as passcodes. \n\nThe YAML file should include relevant metadata, and the risk documentation must follow the established guidelines. Additionally, tests should be derived based on specific methods for checking device security on Android and iOS. \n\nThe acceptance criteria include ensuring the risk is in the correct directory, follows guidelines, and that at least one GitHub issue is created for corresponding tests. The risk must also reference related MASTG v1 tests.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2546", @@ -29232,6 +30242,7 @@ "node_id": "I_kwDOBCbAAs5_QGKS", "title": "Review New Risk & Tests for insecure-random ", "body": "This is a preview of the new implementation of risks and tests for the MAS project.\r\n\r\nIn this issue we'd like to collect your feedback regarding:\r\n\r\n> MASVS-CRYPTO: [risks/MASVS-CRYPTO/1-strong-crypto/insecure-random](https://github.com/OWASP/owasp-mastg/tree/master/risks/MASVS-CRYPTO/1-strong-crypto/insecure-random)\r\n\r\n## Guidelines\r\n\r\nThe [guidelines for writing these new components are available here](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing\r\n) and are open for feedback. **Be sure to read them before providing feedback.**\r\n\r\n## Risks, Tests & Examples\r\n\r\nThe file structure is as follows: `risks/////example-*/`\r\n\r\n\r\nThis draft also includes 2 new components: [mitigations](https://github.com/OWASP/owasp-mastg/tree/master/mitigations) and [prerequisites](https://github.com/OWASP/owasp-mastg/tree/master/prerequisites). Feel free to review and provide feedback on these as well.\r\n\r\n## DISCLAIMER\r\n\r\n> This risk and test is a \"preview draft\", and therefore subject to change. We will be incorporating suggestions and new changes at any time until we finalize it.\r\n\r\n## How to provide feedback\r\n\r\nPlease include comments directly in this issue.\r\n\r\nFeedback about the Guidelines is also welcome: Please include comments on the guidelines directly [in the Google Doc](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing) using the [\"Comment\" function](https://support.google.com/docs/answer/65129?hl=en&co=GENIE.Platform%3DDesktop#zippy=%2Cadd-a-comment).", + "summary": "The issue seeks feedback on the new implementation of risks and tests related to insecure random generation for the MAS project. It outlines the structure for risks, tests, and examples, and introduces new components for mitigations and prerequisites. Contributors are encouraged to review the guidelines for writing these components and provide comments directly in the issue or on the linked Google Doc. As this is a preview draft, changes may occur based on the feedback received. It is important to engage with the provided documentation and structure to ensure clarity and effectiveness in the final implementation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2557", @@ -29260,6 +30271,7 @@ "node_id": "I_kwDOBCbAAs5_QG73", "title": "Review New Risk & Tests for sensitive-data-in-network-traffic", "body": "This is a preview of the new implementation of risks and tests for the MAS project.\r\n\r\nIn this issue we'd like to collect your feedback regarding:\r\n\r\n> MASVS-PRIVACY: [risks/MASVS-PRIVACY/1-data-minimization/sensitive-data-in-network-traffic](https://github.com/OWASP/owasp-mastg/tree/master/risks/MASVS-PRIVACY/1-data-minimization/sensitive-data-in-network-traffic)\r\n\r\n## Guidelines\r\n\r\nThe [guidelines for writing these new components are available here](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing\r\n) and are open for feedback. **Be sure to read them before providing feedback.**\r\n\r\n## Risks, Tests & Examples\r\n\r\nThe file structure is as follows: `risks/////example-*/`\r\n\r\n\r\nThis draft also includes 2 new components: [mitigations](https://github.com/OWASP/owasp-mastg/tree/master/mitigations) and [prerequisites](https://github.com/OWASP/owasp-mastg/tree/master/prerequisites). Feel free to review and provide feedback on these as well.\r\n\r\n## DISCLAIMER\r\n\r\n> This risk and test is a \"preview draft\", and therefore subject to change. We will be incorporating suggestions and new changes at any time until we finalize it.\r\n\r\n## How to provide feedback\r\n\r\nPlease include comments directly in this issue.\r\n\r\nFeedback about the Guidelines is also welcome: Please include comments on the guidelines directly [in the Google Doc](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing) using the [\"Comment\" function](https://support.google.com/docs/answer/65129?hl=en&co=GENIE.Platform%3DDesktop#zippy=%2Cadd-a-comment).", + "summary": "The issue is focused on gathering feedback for the new implementation of risks and tests related to sensitive data in network traffic for the MAS project. Contributors are encouraged to review the draft components of MASVS-PRIVACY, which includes a structured file organization and two new components: mitigations and prerequisites. Guidelines for providing feedback are provided, and users are reminded to make comments directly in the issue as well as on the linked Google Doc. Since this is a preview draft, it is subject to changes based on the suggestions received. Reviewers should carefully examine the guidelines before giving feedback.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2558", @@ -29288,6 +30300,7 @@ "node_id": "I_kwDOBCbAAs5_QHw_", "title": "Review New Risk & Tests for data-in-logs", "body": "This is a preview of the new implementation of risks and tests for the MAS project.\r\n\r\nIn this issue we'd like to collect your feedback regarding:\r\n\r\n> MASVS-STORAGE: [risks/MASVS-STORAGE/2-prevent-data-leakage/data-in-logs](https://github.com/OWASP/owasp-mastg/tree/master/risks/MASVS-STORAGE/2-prevent-data-leakage/data-in-logs)\r\n\r\n## Guidelines\r\n\r\nThe [guidelines for writing these new components are available here](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing\r\n) and are open for feedback. **Be sure to read them before providing feedback.**\r\n\r\n## Risks, Tests & Examples\r\n\r\nThe file structure is as follows: `risks/////example-*/`\r\n\r\n\r\nThis draft also includes 2 new components: [mitigations](https://github.com/OWASP/owasp-mastg/tree/master/mitigations) and [prerequisites](https://github.com/OWASP/owasp-mastg/tree/master/prerequisites). Feel free to review and provide feedback on these as well.\r\n\r\n## DISCLAIMER\r\n\r\n> This risk and test is a \"preview draft\", and therefore subject to change. We will be incorporating suggestions and new changes at any time until we finalize it.\r\n\r\n## How to provide feedback\r\n\r\nPlease include comments directly in this issue.\r\n\r\nFeedback about the Guidelines is also welcome: Please include comments on the guidelines directly [in the Google Doc](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing) using the [\"Comment\" function](https://support.google.com/docs/answer/65129?hl=en&co=GENIE.Platform%3DDesktop#zippy=%2Cadd-a-comment).", + "summary": "The issue seeks feedback on the new implementation of risks and tests for the MAS project, specifically focusing on the prevention of data leakage in logs as outlined in the MASVS-STORAGE guidelines. Reviewers are encouraged to familiarize themselves with the provided guidelines before commenting. The draft includes a new file structure for risks, tests, and examples, along with additional components for mitigations and prerequisites. Feedback on both the draft and the guidelines is welcomed, with comments requested directly in the issue or the linked Google Doc. It's important to note that this draft is subject to change based on the feedback received.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2559", @@ -29316,6 +30329,7 @@ "node_id": "I_kwDOBCbAAs5_QQh9", "title": "MASTG Refactor Part 3: Knowledge", "body": "As a follow-up to https://github.com/OWASP/owasp-mastg/pull/2439, we'd like to refactor our knowledge chapters into individual components as well.\r\n\r\n- They should live in a new `knowledge/` folder and are now identified by the IDs MASTG-KNOW-XXXX (alternatively we could use an alias).\r\n- Once all the content is split, remove the original chapters.\r\n- Make sure the scripts rebuild the content for the PDF\r\n- Update mkdocs.yml\r\n\r\nThe target chapters are (see https://github.com/OWASP/owasp-mastg/tree/master/Document):\r\n\r\n- 0x04c-0x04h\r\n- 0x05d-0x05j\r\n- 0x06d-0x06j\r\n\r\n> Please contact us before picking up this issue. It may need more clarification. We'll add any other specific details to this description.", + "summary": "The issue is focused on refactoring the knowledge chapters of the MASTG project into individual components, which will be organized in a new `knowledge/` folder and assigned unique IDs (MASTG-KNOW-XXXX). The original chapters should be removed after the content is split. Additionally, scripts need to be updated to ensure that the content can be rebuilt for the PDF, and the `mkdocs.yml` file should be revised accordingly. The specific chapters targeted for this refactor are listed in the document link provided. It's advised to reach out for clarification before beginning work on this issue.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2560", @@ -29344,6 +30358,7 @@ "node_id": "I_kwDOBCbAAs6A7tRl", "title": "[MASWE-0010] Weak Cryptographic Key Derivation", "body": "\n\n## Description\n\nCreate a new risk for \"Weak Cryptographic Key Derivation (MASVS-CRYPTO-2)\" using the following information:\n\ne.g. PBKDF2 with insufficient iterations, lack of salt, etc.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/2-***-****/weak-crypto-key-derivation/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Weak Cryptographic Key Derivation\nalias: weak-crypto-key-derivation\nplatform: [android, ios]\nprofiles: [L1, L2]\nmappings:\n masvs-v1: [MSTG-CRYPTO-2]\n masvs-v2: [MASVS-CRYPTO-2]\n mastg-v1: [MASTG-TEST-0061, MASTG-TEST-0014]\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n\nWhen creating the corresponding tests, use the following areas to guide you:\n\n- weak sources\n- lack of salt encryption when doing PBKDF2\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n- [MASTG-TEST-0061 - Verifying the Configuration of Cryptographic Standard Algorithms (ios)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0061/)\n- [MASTG-TEST-0014 - Testing the Configuration of Cryptographic Standard Algorithms (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/)\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/2-***-****/weak-crypto-key-derivation/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "A new risk titled \"Weak Cryptographic Key Derivation\" needs to be created for the MASVS-CRYPTO-2 category. The creation involves drafting a markdown file at the specified directory, outlining sections including Overview, Impact, Modes of Introduction, and Mitigations. The content should adhere to the established guidelines and reference resources, particularly the NIST publication on cryptographic standards. \n\nAdditionally, the task requires examining related tests from the MASTG for input on defining risks and tests, with a specific focus on weaknesses like insufficient PBKDF2 iterations and lack of salt. Acceptance criteria include ensuring the risk file is correctly located, follows guidelines, and that at least one GitHub issue is opened for related tests.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2574", @@ -29375,6 +30390,7 @@ "node_id": "I_kwDOBCbAAs6A7tSN", "title": "[MASWE-0011] Cryptographic Key Rotation Not Implemented", "body": "\n\n## Description\n\nCreate a new risk for \"Cryptographic Key Rotation Not Implemented (MASVS-CRYPTO-2)\" using the following information:\n\nKey rotation is a best practice to limit the impact of a key compromise. It is especially important for long-lived keys such as asymmetric keys.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/2-***-****/no-key-rotation/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Cryptographic Key Rotation Not Implemented\nalias: no-key-rotation\nplatform: [android, ios]\nprofiles: [L2]\nmappings:\n masvs-v2: [MASVS-CRYPTO-2]\n mastg-v1: []\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n- \n\nWhen creating the corresponding tests, use the following areas to guide you:\n\n- long-lived keys (cryptoperiods as per NIST.SP.800-57pt1r5)\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/2-***-****/no-key-rotation/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "A new risk needs to be created for \"Cryptographic Key Rotation Not Implemented (MASVS-CRYPTO-2)\" to emphasize the importance of key rotation in limiting the impact of key compromise, especially for long-lived keys. \n\nThe task involves creating a file at \"`risks/MASVS-CRYPTO/2-***-****/no-key-rotation/risk.md`\" with specified YAML content and sections including Overview, Impact, Modes of Introduction, and Mitigations. The sections should adhere to the provided guidelines and incorporate relevant references, particularly focusing on long-lived keys as per NIST guidelines.\n\nAdditionally, acceptance criteria must be fulfilled, such as ensuring the risk is in the correct directory, content meets guidelines, and at least one GitHub Issue is created for associated tests derived from the \"Modes of Introduction.\"", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2575", @@ -29404,6 +30420,7 @@ "node_id": "I_kwDOBCbAAs6A7tSw", "title": "[MASWE-0012] Insecure or Wrong Usage of Cryptographic Key", "body": "\r\n## Description\r\n\r\nCreate a new risk for \"Insecure or Wrong Usage of Cryptographic Key (MASVS-CRYPTO-2)\" using the following information:\r\n\r\nAccording to NIST SP 800-57 Part 1 Revision 5, Section 5.2 on Key Usage, it is recommended that a single key be dedicated to a single purpose, such as encryption, integrity authentication, key wrapping, random bit generation, or digital signatures. This recommendation is based on a number of considerations:\r\n\r\n- Using the same key for different cryptographic processes can compromise the security of those processes.\r\n- The potential damage from a compromised key is minimized if its use is limited.\r\n- Different uses of keys can conflict, reducing their effectiveness. For example, a key used for both key transport and digital signatures may have conflicting retention and destruction requirements due to their different cryptoperiods.\r\n\r\nHowever, NIST also recognizes exceptions where a single process using a key can effectively perform multiple roles, such as a digital signature that provides both integrity and source authentication, or a symmetric key used for authenticated encryption that simultaneously encrypts and authenticates data. This approach emphasizes security efficiency without compromising the integrity of the key and the data it protects.\r\n\r\n\r\n\r\nCreate \"`risks/MASVS-CRYPTO/2-***-****/insecure-key-usage/risk.md`\" including the following content:\r\n\r\n```yaml\r\n---\r\ntitle: Insecure or Wrong Usage of Cryptographic Key\r\nalias: insecure-key-usage\r\nplatform: [android, ios]\r\nprofiles: [L2]\r\nmappings:\r\n masvs-v1: [MSTG-CRYPTO-5]\r\n masvs-v2: [MASVS-CRYPTO-2]\r\n mastg-v1: [MASTG-TEST-0062, MASTG-TEST-0015]\r\n\r\n---\r\n\r\n## Overview\r\n\r\n## Impact\r\n\r\n## Modes of Introduction\r\n\r\n## Mitigations\r\n\r\n```\r\n\r\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\r\n\r\nUse at least the following references:\r\n\r\n- \r\n\r\nWhen creating the corresponding tests, use the following areas to guide you:\r\n\r\n- authorized key algorithm\r\n- key reuse for different purposes or operations (encrypt, decrypt, sign,...)\r\n\r\n**MASTG v1 Refactoring:**\r\n\r\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\r\n\r\n- [MASTG-TEST-0062 - Testing Key Management (ios)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0062/)\r\n- [MASTG-TEST-0015 - Testing the Purposes of Keys (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0015/)\r\n\r\n## Acceptance Criteria\r\n\r\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/2-***-****/insecure-key-usage/risk.md`)\r\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\r\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\r\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\r\n\r\n ", + "summary": "A new risk titled \"Insecure or Wrong Usage of Cryptographic Key (MASVS-CRYPTO-2)\" needs to be created. The risk will emphasize the importance of using dedicated keys for specific cryptographic purposes, as recommended by NIST guidelines. The new risk should be documented in the specified directory with a YAML structure that includes an overview, impact, modes of introduction, and mitigations sections. \n\nAdditionally, references to NIST documentation and relevant MASTG tests must be incorporated. It’s essential to ensure that the risk file adheres to the provided writing guidelines and that corresponding GitHub issues for the related tests are created. \n\nTo complete this task, follow the outlined structure and ensure all acceptance criteria are met.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2576", @@ -29433,6 +30450,7 @@ "node_id": "I_kwDOBCbAAs6A7tTu", "title": "[MASWE-0014] Cryptographic Keys Not Properly Protected at Rest", "body": "## Description\r\n\r\nCreate a new weakness for \"Cryptographic Keys Not Properly Protected at Rest (MASVS-CRYPTO-2)\" using the following information:\r\n\r\ne.g. storing keys in SharedPreferences, storing keys in files, hardcoded keys, etc.\r\n\r\n\r\n\r\nUpdate https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CRYPTO/MASWE-0014.md including the following content:\r\n\r\n```md\r\n\r\n## Overview\r\n\r\n## Impact\r\n\r\n## Modes of Introduction\r\n\r\n## Mitigations\r\n\r\n```\r\n\r\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**. Use other MASWE items in `status: new` for reference. For example: MASWE-0027\r\n\r\nUse at least the following references (read and understand them to create the content):\r\n\r\n- \r\n\r\nWhen creating the corresponding tests, use the following areas to guide you:\r\n\r\n- platform keystore (Android KeyStore / iOS KeyChain)\r\n- TEE/SE\r\n- Cryptographic Keys Not Encrypted with key from platform keystore. envelope encryption (DEK+KEK) (considered \"equivalent protection\")\r\n- Key Wrapping (NIST.SP.800-175Br1 5.3.5)\r\n\r\n**MASTG v1 Refactoring:**\r\n\r\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define the weakness, especially the \"Modes of Introduction\" section since it indicates how the weakness can happen and therefore represents things we need to test for.\r\n\r\n- [MASTG-TEST-0052 - Testing Local Data Storage (ios)](https://mas.owasp.org/MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0052/)\r\n- [MASTG-TEST-0001 - Testing Local Storage for Sensitive Data (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0001/)\r\n\r\n ", + "summary": "The issue discusses the need to create a new weakness entry for \"Cryptographic Keys Not Properly Protected at Rest (MASVS-CRYPTO-2).\" The proposed content includes adding sections for Overview, Impact, Modes of Introduction, and Mitigations in the existing documentation. Reference materials, including NIST guidelines, are suggested for inclusion to ensure comprehensive coverage. Specific examples of keys storage vulnerabilities, such as using SharedPreferences or hardcoding keys, should be highlighted. Additionally, guidance on tests related to key management in various platforms is provided, emphasizing the importance of encryption methods and proper key handling practices. The task involves following established guidelines and using relevant references to ensure thoroughness in documenting the weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2578", @@ -29464,6 +30482,7 @@ "node_id": "I_kwDOBCbAAs6A7tUI", "title": "[MASWE-0015] Deprecated Android KeyStore Implementations", "body": "\n\n## Description\n\nCreate a new risk for \"Deprecated Android KeyStore Implementations (MASVS-CRYPTO-2)\" using the following information:\n\nAvoid deprecated implementations such as BKS\n\n\n\nCreate \"`risks/MASVS-CRYPTO/2-***-****/deprecated-keystore/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Deprecated Android KeyStore Implementations\nalias: deprecated-keystore\nplatform: [android]\nprofiles: [L2]\nmappings:\n masvs-v1: [MSTG-CRYPTO-4]\n masvs-v2: [MASVS-CRYPTO-2, MASVS-CODE-3]\n mastg-v1: [MASTG-TEST-0014]\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n- \n- \n\nWhen creating the corresponding tests, use the following areas to guide you:\n\n- Bouncy Castle (BKS)\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n- [MASTG-TEST-0014 - Testing the Configuration of Cryptographic Standard Algorithms (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/)\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/2-***-****/deprecated-keystore/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "A new risk related to \"Deprecated Android KeyStore Implementations\" needs to be created under the category MASVS-CRYPTO-2. The task involves creating a markdown file in the specified directory with structured content, including an overview, impact, modes of introduction, and mitigations. It's important to reference specific documentation and guidelines while ensuring the file is correctly formatted. Additionally, at least one GitHub issue should be generated for corresponding tests pertaining to the identified risk. The acceptance criteria outline the necessary steps to verify the completion of this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2579", @@ -29495,6 +30514,7 @@ "node_id": "I_kwDOBCbAAs6A7tUl", "title": "[MASWE-0016] Unsafe Handling of Imported Cryptographic Keys", "body": "\n\n## Description\n\nCreate a new risk for \"Unsafe Handling of Imported Cryptographic Keys (MASVS-CRYPTO-2)\" using the following information:\n\nImporting keys without validating their origin or integrity, or using insecure custom key exchange protocols, can inadvertently introduce malicious or compromised keys into the app environment.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/2-***-****/unsafe-imported-key-handling/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Unsafe Handling of Imported Cryptographic Keys\nalias: unsafe-imported-key-handling\nplatform: [android, ios]\nprofiles: [L2]\nmappings:\n masvs-v2: [MASVS-CRYPTO-2, MASVS-CODE-4]\n mastg-v1: []\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n- \n- \n- \n\nWhen creating the corresponding tests, use the following areas to guide you:\n\n- key import from untrusted sources\n- key import from untrusted storage\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/2-***-****/unsafe-imported-key-handling/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "A new risk titled \"Unsafe Handling of Imported Cryptographic Keys\" needs to be created for the MASVS-CRYPTO-2 category. This risk highlights the dangers of importing cryptographic keys without proper validation, which can lead to the introduction of compromised keys into the app.\n\nThe task involves creating a markdown file in the specified directory with structured content that includes sections for Overview, Impact, Modes of Introduction, and Mitigations. Guidelines for writing the content are provided, along with references that need to be included. Additionally, tests related to key imports from untrusted sources and storage should be developed based on the identified modes of introduction.\n\nAcceptance criteria include ensuring the markdown file is correctly placed, adheres to guidelines, and at least one GitHub issue for the tests is created. The risk must also reference related MASTG v1 tests in its metadata.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2580", @@ -29524,6 +30544,7 @@ "node_id": "I_kwDOBCbAAs6A7tVH", "title": "[MASWE-0017] Cryptographic Keys Not Properly Protected on Export", "body": "\n\n## Description\n\nCreate a new risk for \"Cryptographic Keys Not Properly Protected on Export (MASVS-CRYPTO-2)\" using the following information:\n\nBefore exporting, keys should be \"wrapped\" or encrypted with another key. This process ensures that the cryptographic key is protected during and after export. This is true even if the key is sent over a secure channel.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/2-***-****/crypto-keys-not-protected-export/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Cryptographic Keys Not Properly Protected on Export\nalias: crypto-keys-not-protected-export\nplatform: [android, ios]\nprofiles: [L2]\nmappings:\n masvs-v2: [MASVS-CRYPTO-2, MASVS-STORAGE-1, MASVS-NETWORK-1]\n mastg-v1: []\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n- \n- \n- \n\nWhen creating the corresponding tests, use the following areas to guide you:\n\n- key wrapping (NIST.SP.800-175Br1 5.3.5)\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/2-***-****/crypto-keys-not-protected-export/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "The issue outlines the need to create a new risk entry titled \"Cryptographic Keys Not Properly Protected on Export\" under the MASVS-CRYPTO category. The entry must highlight the importance of wrapping or encrypting cryptographic keys before export to ensure their protection, even when transmitted over secure channels.\n\nA specific file structure is required for the risk documentation, and it should include sections for Overview, Impact, Modes of Introduction, and Mitigations. References and guidelines are provided for completing these sections.\n\nAdditionally, corresponding tests related to key wrapping must be created, and the acceptance criteria include the proper placement of the risk file, adherence to content guidelines, and the linkage of related MASTG v1 tests.\n\nTo proceed, the required file needs to be created and populated as specified, along with the related tests.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2581", @@ -29553,6 +30574,7 @@ "node_id": "I_kwDOBCbAAs6A7tVm", "title": "[MASWE-0018] Cryptographic Keys Access Not Restricted", "body": "\n\n## Description\n\nCreate a new risk for \"Cryptographic Keys Access Not Restricted (MASVS-CRYPTO-2)\" using the following information:\n\nEnsuring that cryptographic keys are accessible only under strict conditions, such as when the device is unlocked by an authenticated user, within secure application contexts, or for limited periods of time, is critical to maintaining the confidentiality and integrity of encrypted data.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/2-***-****/crypto-key-access-not-restricted/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Cryptographic Keys Access Not Restricted\nalias: crypto-key-access-not-restricted\nplatform: [android, ios]\nprofiles: [L2]\nmappings:\n masvs-v2: [MASVS-CRYPTO-2, MASVS-AUTH-2, MASVS-AUTH-3]\n mastg-v1: []\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n- \n- \n- \n\nWhen creating the corresponding tests, use the following areas to guide you:\n\n- from a Background Process\n- locked device (iOS kSecAttrAccessibleWhenUnlockedThisDeviceOnly, Android setUnlockedDeviceRequired)\n- time-based access (duration)\n- Require User Presence\n- application-specific password\n- biometric authentication\n- key use restricted e.g. requiring user auth with biometrics, User Presence.\n- especially for sensitive operations\n- keys restricted/authorized for a duration of time or specific crypto operation, etc.\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/2-***-****/crypto-key-access-not-restricted/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "The issue outlines the necessity to create a new risk related to the accessibility of cryptographic keys, specifically \"Cryptographic Keys Access Not Restricted (MASVS-CRYPTO-2).\" The task involves developing a markdown file that includes various sections such as Overview, Impact, Modes of Introduction, and Mitigations, following specific guidelines. \n\nAdditionally, the creator has provided references and highlighted areas to focus on when constructing tests for this risk, including scenarios involving device locks, user authentication, and time-based access. \n\nTo complete the task, the following should be ensured:\n- The risk file is placed in the specified directory.\n- The content adheres to established guidelines.\n- At least one GitHub issue is created for related tests.\n- The risk file references relevant MASTG v1 tests.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2582", @@ -29582,6 +30604,7 @@ "node_id": "I_kwDOBCbAAs6A7tWs", "title": "[MASWE-0020] Weak Encryption", "body": "\r\n## Description\r\n\r\nCreate a new risk for \"Weak Encryption (MASVS-CRYPTO-1)\" using the following information:\r\n\r\nThe use of outdated encryption methods like DES and 3DES may compromise data confidentiality and integrity.\r\n\r\n\r\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\r\n\r\nUse at least the following references:\r\n\r\n- \r\n- \r\n- \r\n- \r\n- \r\n\r\nWhen creating the corresponding tests, use the following areas to guide you:\r\n\r\n- Weak encryption algorithms (e.g. DES, 3DES, etc.)\r\n- Weak encryption modes (e.g. ECB, etc.)\r\n- Cipher.getInstance(\"AES\") defaults to ECB (Android)\r\n\r\n**MASTG v1 Refactoring:**\r\n\r\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\r\n\r\n- [MASTG-TEST-0014 - Testing the Configuration of Cryptographic Standard Algorithms (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/)\r\n\r\n\r\n ", + "summary": "The issue requests the creation of a new risk entry for \"Weak Encryption (MASVS-CRYPTO-1)\" due to the potential compromise of data confidentiality and integrity when using outdated encryption methods like DES and 3DES. It provides guidelines for structuring the risk based on the \"Writing MASTG Risks & Tests\" document and lists specific references to support the information. Additionally, it outlines areas to focus on when developing corresponding tests, including weak encryption algorithms and modes. There's also a mention of using a MASVS v1 ID to find related tests for refining the risk and tests. \n\nTo proceed, one should compile the required information into the relevant sections while adhering to the provided guidelines and references.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2584", @@ -29613,6 +30636,7 @@ "node_id": "I_kwDOBCbAAs6A7tXP", "title": "[MASWE-0021] Weak Hashing", "body": "\n\n## Description\n\nCreate a new risk for \"Weak Hashing (MASVS-CRYPTO-1)\" using the following information:\n\nUtilizing weak hashing algorithms such as MD5 and SHA1 in a security sensitive context may compromise data integrity and authenticity.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/1-***-****/weak-hashing/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Weak Hashing\nalias: weak-hashing\nplatform: [android, ios]\nprofiles: [L1, L2]\nmappings:\n masvs-v1: [MSTG-CRYPTO-4]\n masvs-v2: [MASVS-CRYPTO-1]\n mastg-v1: [MASTG-TEST-0014]\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n- \n\nWhen creating the corresponding tests, use the following areas to guide you:\n\n- Weak hashing algorithms (e.g. MD5, SHA1, etc.)\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n- [MASTG-TEST-0014 - Testing the Configuration of Cryptographic Standard Algorithms (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/)\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/1-***-****/weak-hashing/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "The issue requests the creation of a new risk category for \"Weak Hashing (MASVS-CRYPTO-1)\" due to the potential compromise of data integrity and authenticity when weak hashing algorithms like MD5 and SHA1 are used in security-sensitive contexts. \n\nA new markdown file should be created in the specified directory structure, including sections for Overview, Impact, Modes of Introduction, and Mitigations. The content must adhere to the provided guidelines and incorporate references related to cryptography. Additionally, tests related to this risk need to be defined, referencing the appropriate MASTG tests.\n\nAcceptance criteria include ensuring the file is in the correct directory, following the guidelines, creating at least one GitHub Issue for the tests, and indicating the related MASTG v1 tests in the metadata. \n\nTo proceed, gather the necessary information and references to complete the markdown file and associated tests.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2585", @@ -29644,6 +30668,7 @@ "node_id": "I_kwDOBCbAAs6A7tXy", "title": "[MASWE-0022] Predictable Initialization Vectors (IVs)", "body": "\n\n## Description\n\nCreate a new risk for \"Predictable Initialization Vectors (IVs) (MASVS-CRYPTO-1)\" using the following information:\n\nThe use of predictable IVs (hardcoded, null, reused) in a security sensitive context can weaken data encryption strength and potentially compromise confidentiality.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/1-***-****/predictable-ivs/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Predictable Initialization Vectors (IVs)\nalias: predictable-ivs\nplatform: [android, ios]\nprofiles: [L1, L2]\nmappings:\n masvs-v1: [MSTG-CRYPTO-4]\n masvs-v2: [MASVS-CRYPTO-1]\n mastg-v1: [MASTG-TEST-0014]\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n\nWhen creating the corresponding tests, use the following areas to guide you:\n\n- not use the IvParameterSpec.class anymore for GCM, use the GCMParameterSpec.class instead (Android)\n- Hardcoded IVs\n- Null IVs\n- Reused IVs\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n- [MASTG-TEST-0014 - Testing the Configuration of Cryptographic Standard Algorithms (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/)\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/1-***-****/predictable-ivs/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "A new risk titled \"Predictable Initialization Vectors (IVs) (MASVS-CRYPTO-1)\" needs to be created, highlighting the security concerns associated with the use of predictable IVs in encryption. The task involves creating a risk file at the specified directory and including sections for overview, impact, modes of introduction, and mitigations, following the provided guidelines. Relevant references and test areas related to IV usage in cryptography should also be incorporated.\n\nAcceptance criteria include ensuring the file is in the correct directory, adheres to the content guidelines, and at least one GitHub issue is created for corresponding tests linked to the modes of introduction. Additionally, the risk should reference related MASTG v1 tests in its metadata.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2586", @@ -29675,6 +30700,7 @@ "node_id": "I_kwDOBCbAAs6A7tYV", "title": "[MASWE-0023] Weak Padding", "body": "\n\n## Description\n\nCreate a new risk for \"Weak Padding (MASVS-CRYPTO-1)\" using the following information:\n\nThe use of weak padding such as NoPadding, ZeroPadding, etc. in a security sensitive context should be avoided to ensure the integrity and authenticity of the data.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/1-***-****/weak-padding/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Weak Padding\nalias: weak-padding\nplatform: [android, ios]\nprofiles: [L1, L2]\nmappings:\n masvs-v1: [MSTG-CRYPTO-4]\n masvs-v2: [MASVS-CRYPTO-1]\n mastg-v1: [MASTG-TEST-0014]\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n- \n- \n- \n\nWhen creating the corresponding tests, use the following areas to guide you:\n\n- NoPadding\n- PKCS1-v1_5\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n- [MASTG-TEST-0014 - Testing the Configuration of Cryptographic Standard Algorithms (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/)\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/1-***-****/weak-padding/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "The issue outlines the need to create a new risk entry for \"Weak Padding (MASVS-CRYPTO-1)\" to address the use of weak padding methods like NoPadding and ZeroPadding in security-sensitive contexts. It specifies the creation of a risk file located at `risks/MASVS-CRYPTO/1-***-****/weak-padding/risk.md`, which should include structured content such as an overview, impact, modes of introduction, and mitigations, following the established guidelines.\n\nAdditionally, references to relevant security documents are provided, and there is a requirement to create corresponding tests based on specific areas like NoPadding and PKCS1-v1_5. Acceptance criteria are defined to ensure the risk is properly documented and linked to related tests.\n\nTo proceed, the focus should be on drafting the risk content in the specified format and ensuring all acceptance criteria are met, particularly linking to existing tests.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2587", @@ -29706,6 +30732,7 @@ "node_id": "I_kwDOBCbAAs6A7tZL", "title": "[MASWE-0024] Weak Message Authentication Codes (MAC)", "body": "\n\n## Description\n\nCreate a new risk for \"Weak Message Authentication Codes (MAC) (MASVS-CRYPTO-1)\" using the following information:\n\nThe use of weak MAC such as HmacMD5, etc. in a security sensitive context may expose cryptographic vulnerabilities, affecting data integrity.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/1-***-****/weak-mac/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Weak Message Authentication Codes (MAC)\nalias: weak-mac\nplatform: [android, ios]\nprofiles: [L1, L2]\nmappings:\n masvs-v1: [MSTG-CRYPTO-4]\n masvs-v2: [MASVS-CRYPTO-1]\n mastg-v1: [MASTG-TEST-0014]\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n- \n\n\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n- [MASTG-TEST-0014 - Testing the Configuration of Cryptographic Standard Algorithms (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/)\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/1-***-****/weak-mac/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "The issue is focused on creating a new risk for \"Weak Message Authentication Codes (MAC)\" as defined in the MASVS-CRYPTO framework. It highlights that using weak MACs, like HmacMD5, in sensitive contexts can lead to cryptographic vulnerabilities that jeopardize data integrity. \n\nThe task involves writing a risk document in the specified directory format and completing sections such as Overview, Impact, Modes of Introduction, and Mitigations, while adhering to provided guidelines. Additionally, relevant references must be included, and related MASTG tests should be utilized to inform the risk and tests.\n\nAcceptance criteria include ensuring the document is correctly placed, content follows the guidelines, at least one GitHub issue for associated tests is created, and the document includes references to related MASTG v1 tests. \n\nTo move forward, the focus should be on drafting the risk document and ensuring all criteria are met.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2588", @@ -29735,6 +30762,7 @@ "node_id": "I_kwDOBCbAAs6A7taJ", "title": "[MASWE-0025] Weak Signature", "body": "\n\n## Description\n\nCreate a new risk for \"Weak Signature (MASVS-CRYPTO-1)\" using the following information:\n\nThe use of weak signature such as SHA1withRSA, etc. in a security sensitive context should be avoided to ensure the integrity and authenticity of the data.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/1-***-****/weak-signatures/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Weak Signature\nalias: weak-signatures\nplatform: [android, ios]\nprofiles: [L1, L2]\nmappings:\n masvs-v1: [MSTG-CRYPTO-4]\n masvs-v2: [MASVS-CRYPTO-1]\n mastg-v1: [MASTG-TEST-0014]\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n- \n- \n\n\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n- [MASTG-TEST-0014 - Testing the Configuration of Cryptographic Standard Algorithms (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/)\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/1-***-****/weak-signatures/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "The issue requests the creation of a new risk entry for \"Weak Signature (MASVS-CRYPTO-1)\" to address the use of weak signatures like SHA1withRSA in security-sensitive contexts. The task involves developing a risk markdown file with specified content and structure, including an overview, impact, modes of introduction, and mitigations. References to relevant documentation and guidelines for writing risks and tests are provided. Acceptance criteria include correct file placement, adherence to content guidelines, and linking to corresponding tests. The next steps involve compiling the required information and ensuring compliance with the outlined criteria.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2589", @@ -29764,6 +30792,7 @@ "node_id": "I_kwDOBCbAAs6A7ta6", "title": "[MASWE-0026] Improper Verification of Cryptographic Signature", "body": "\n\n## Description\n\nCreate a new risk for \"Improper Verification of Cryptographic Signature (MASVS-CRYPTO-1)\" using the following information:\n\nCryptographic signature verification should be performed properly to ensure the integrity and authenticity of the data.\n\n\n\nCreate \"`risks/MASVS-CRYPTO/1-***-****/improper-signature-verification/risk.md`\" including the following content:\n\n```yaml\n---\ntitle: Improper Verification of Cryptographic Signature\nalias: improper-signature-verification\nplatform: [android, ios]\nprofiles: [L1, L2]\nmappings:\n masvs-v1: [MSTG-CRYPTO-4]\n masvs-v2: [MASVS-CRYPTO-1]\n mastg-v1: [MASTG-TEST-0014]\n\n---\n\n## Overview\n\n## Impact\n\n## Modes of Introduction\n\n## Mitigations\n\n```\n\n**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n\nUse at least the following references:\n\n- \n- \n\n\n\n**MASTG v1 Refactoring:**\n\nIf the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.\n\n- [MASTG-TEST-0014 - Testing the Configuration of Cryptographic Standard Algorithms (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/)\n\n## Acceptance Criteria\n\n- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/1-***-****/improper-signature-verification/risk.md`)\n- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)\n- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from \"Modes of Introduction\")\n- [ ] The risk indicates the related MASTG v1 tests in its metadata.\n\n ", + "summary": "A new risk needs to be created for \"Improper Verification of Cryptographic Signature (MASVS-CRYPTO-1).\" The risk should be documented in a specified directory and must include an overview, impact, modes of introduction, and mitigations, following established guidelines. References to relevant resources such as CWE and NIST publications should be included. Additionally, related tests from MASTG v1 need to be identified and linked in the metadata. Acceptance criteria include correct directory placement, adherence to guidelines, creation of at least one corresponding GitHub issue for tests, and proper documentation of related MASTG tests.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2590", @@ -29793,6 +30822,7 @@ "node_id": "I_kwDOBCbAAs6ECDt9", "title": "[tool] Add Drozer to Android Testing Guide back ", "body": "Recently a new version of Drozer has been released [Drozer 3.0.0](https://github.com/WithSecureLabs/drozer/releases/tag/3.0.0). It supports Python 3 and modern Java per their release notes:\r\n`Compatibility with Python 3 and modern Java.`\r\n\r\nSo it makes sense to bring it back to the guide since it is more convenient for the security testing than ADB.", + "summary": "The issue discusses the need to reintroduce Drozer into the Android Testing Guide following the release of Drozer 3.0.0, which is compatible with Python 3 and modern Java. The author suggests that Drozer is a more convenient tool for security testing compared to ADB. The proposed action is to update the guide to include the latest information about Drozer.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2598", @@ -29824,6 +30854,7 @@ "node_id": "I_kwDOBCbAAs6HzIBL", "title": "[Tool] Add blint for SBOM", "body": "See https://github.com/owasp-dep-scan/blint\n\n```\nblint sbom -i /path/to/apk -o bom.json --deep\n```", + "summary": "The issue suggests adding a tool called \"blint\" for generating Software Bill of Materials (SBOM) in the project. It references the blint documentation and provides a command example for its usage. To address this issue, the implementation of blint integration should be considered, allowing users to generate SBOMs from APK files.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2613", @@ -29853,6 +30884,7 @@ "node_id": "I_kwDOBCbAAs6JbXsm", "title": "[Tool] Add blutter?", "body": "Evaluate before adding. Can it be listed as an alternative of reflutter? Or is it essentially different?\n\n> B(l)utter: Flutter Mobile Application Reverse Engineering Tool by Compiling Dart AOT Runtime. Currently the application supports only Android libapp.so (arm64 only). Also the application is currently work only against recent Dart versions.\n\nhttps://github.com/worawit/blutter\n\n[hitbsecconf2023 slides](https://conference.hitb.org/hitbsecconf2023hkt/materials/D2%2520COMMSEC%2520-%2520B(l)utter%2520%25E2%2580%2593%2520Reversing%2520Flutter%2520Applications%2520by%2520using%2520Dart%2520Runtime%2520-%2520Worawit%2520Wangwarunyoo.pdf)\n\nhttps://youtu.be/RtKOe8HQy8Q?si=B59P5hQXVrk5J7Sw", + "summary": "The issue discusses the potential addition of a tool called B(l)utter, which is a Flutter mobile application reverse engineering tool that compiles the Dart AOT runtime. Currently, it only supports Android's libapp.so for arm64 architecture and works with recent Dart versions. The main inquiry is whether B(l)utter can be considered an alternative to Reflutter or if it serves a fundamentally different purpose. Evaluation of its capabilities and compatibility with existing tools is suggested before any decision is made.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2619", @@ -29883,6 +30915,7 @@ "node_id": "I_kwDOBCbAAs6OsnEa", "title": "[tool] sideloadly", "body": "Add sideloadly and link on installing IPAs page (techniques/ios/MASTG-TECH-0056.md).", + "summary": "The issue requests the addition of Sideloadly to the installation methods for IPAs on the specified documentation page (techniques/ios/MASTG-TECH-0056.md). To address this, Sideloadly should be integrated into the page along with appropriate linking and instructions for its use.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2657", @@ -29911,6 +30944,7 @@ "node_id": "I_kwDOBCbAAs6PEvaV", "title": "[MASWE-0028] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0028\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0028.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0028/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness labeled \"MASWE-0028\" needs to be created, with details provided in the linked GitHub repository file and the corresponding website page. It's essential to adhere to the specified MAS guidelines during the creation process. Further action involves drafting the content according to these guidelines.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2667", @@ -29926,8 +30960,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -29940,6 +30974,7 @@ "node_id": "I_kwDOBCbAAs6PEvbF", "title": "[MASWE-0029] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0029\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0029.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0029/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0029,\" needs to be created as detailed in the provided GitHub repository and website page links. The task requires adherence to the MAS guidelines available through the specified document. It's essential to follow these guidelines closely while developing the new entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2668", @@ -29955,8 +30990,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -29969,6 +31004,7 @@ "node_id": "I_kwDOBCbAAs6PEvcA", "title": "[MASWE-0030] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0030\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0030.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0030/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0030,\" has been identified and needs to be documented. The relevant information can be found in the provided GitHub repository file and website page. It is important to follow the specified MAS guidelines for the documentation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2669", @@ -29984,8 +31020,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -29998,6 +31034,7 @@ "node_id": "I_kwDOBCbAAs6PEvcz", "title": "[MASWE-0031] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0031\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0031.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0031/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0031,\" needs to be created. The relevant details can be found in the provided GitHub repository file and the corresponding website page. It is essential to adhere to the specified MAS guidelines when creating this weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2670", @@ -30013,8 +31050,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30027,6 +31064,7 @@ "node_id": "I_kwDOBCbAAs6PEvdm", "title": "[MASWE-0032] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0032\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0032.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0032/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0032,\" needs to be created. Relevant information can be found in the designated GitHub repository file and the website page. To proceed, it is important to follow the provided MAS guidelines for the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2671", @@ -30042,8 +31080,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30056,6 +31094,7 @@ "node_id": "I_kwDOBCbAAs6PEveN", "title": "[MASWE-0033] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0033\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0033.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0033/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0033,\" needs to be created. Reference materials include a GitHub repository file and a website page dedicated to this weakness. It is important to adhere to the provided MAS guidelines while developing the content.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2672", @@ -30071,8 +31110,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30085,6 +31124,7 @@ "node_id": "I_kwDOBCbAAs6PEve3", "title": "[MASWE-0034] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0034\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0034.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0034/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0034,\" needs to be created in the specified GitHub repository and linked to its corresponding website page. The guidelines outlined in the provided document must be followed for this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2673", @@ -30100,8 +31140,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30114,6 +31154,7 @@ "node_id": "I_kwDOBCbAAs6PEvfb", "title": "[MASWE-0035] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0035\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0035.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0035/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0035,\" needs to be created in the specified GitHub repository. The details for this new weakness can be found in the provided links. It is important to adhere to the outlined MAS guidelines when completing this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2674", @@ -30129,8 +31170,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30143,6 +31184,7 @@ "node_id": "I_kwDOBCbAAs6PEvgG", "title": "[MASWE-0036] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0036\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0036.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0036/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness, MASWE-0036, with specific references to its documentation in the GitHub repository and on the OWASP website. The task requires adherence to the provided MAS guidelines for its development. It would be beneficial to review these guidelines while preparing the new weakness documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2675", @@ -30158,8 +31200,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30172,6 +31214,7 @@ "node_id": "I_kwDOBCbAAs6PEvgy", "title": "[MASWE-0037] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0037\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0037.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0037/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness labeled \"MASWE-0037\" needs to be created. The relevant details can be found in the specified GitHub repository file and the website page. To proceed, ensure adherence to the provided MAS guidelines for the documentation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2676", @@ -30187,8 +31230,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30201,6 +31244,7 @@ "node_id": "I_kwDOBCbAAs6PEvha", "title": "[MASWE-0038] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0038\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0038.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0038/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0038,\" needs to be created, referencing specific files in the GitHub repository and the corresponding web page. The task involves adhering to the provided MAS guidelines for documentation. It is essential to review the guidelines to ensure proper formatting and content structure when creating the new weakness entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2677", @@ -30216,8 +31260,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30230,6 +31274,7 @@ "node_id": "I_kwDOBCbAAs6PEviG", "title": "[MASWE-0039] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0039\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0039.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0039/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0039,\" needs to be created following the specified guidelines. This involves updating the GitHub repository to include the new weakness details as outlined in the provided links. It is essential to ensure adherence to the MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2678", @@ -30245,8 +31290,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30259,6 +31304,7 @@ "node_id": "I_kwDOBCbAAs6PEviq", "title": "[MASWE-0040] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0040\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0040.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0040/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness entry for \"MASWE-0040.\" The details for the weakness can be found in a specified GitHub repository file and a dedicated website page. It is essential to adhere to the provided MAS guidelines while developing this entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2679", @@ -30274,8 +31320,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30288,6 +31334,7 @@ "node_id": "I_kwDOBCbAAs6PEvjb", "title": "[MASWE-0041] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0041\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0041.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0041/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness, \"MASWE-0041,\" with specific references to both the GitHub repository and the corresponding website page. The task involves following the provided MAS guidelines for the development of this weakness. Ensure to adhere closely to the guidelines when completing this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2680", @@ -30303,8 +31350,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30317,6 +31364,7 @@ "node_id": "I_kwDOBCbAAs6PEvkG", "title": "[MASWE-0042] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0042\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0042.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0042/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness labeled \"MASWE-0042.\" The details for this weakness can be found in the specified GitHub repository and on the associated website page. It is important to adhere to the provided MAS guidelines when developing this new entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2681", @@ -30332,8 +31380,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30346,6 +31394,7 @@ "node_id": "I_kwDOBCbAAs6PEvkt", "title": "[MASWE-0043] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0043\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0043.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0043/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness entry for \"MASWE-0043\" in the MASVS-AUTH category. The relevant GitHub repository and website page links have been provided. The task should adhere to the specified MAS guidelines for consistency and accuracy.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2682", @@ -30363,8 +31412,8 @@ 477 ], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30377,6 +31426,7 @@ "node_id": "I_kwDOBCbAAs6PEvla", "title": "[MASWE-0044] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0044\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0044.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0044/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0044,\" needs to be created. Relevant documentation can be found in the GitHub repository and on the designated website page. It is essential to adhere to the provided MAS guidelines while developing this new entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2683", @@ -30392,8 +31442,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30406,6 +31456,7 @@ "node_id": "I_kwDOBCbAAs6PEvmE", "title": "[MASWE-0045] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0045\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0045.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0045/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness labeled \"MASWE-0045\" needs to be created and documented. The relevant GitHub repository file and website page have been provided for reference. It is essential to adhere to the specified MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2684", @@ -30421,8 +31472,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30435,6 +31486,7 @@ "node_id": "I_kwDOBCbAAs6PEvmw", "title": "[MASWE-0046] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0046\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-AUTH/MASWE-0046.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-AUTH/MASWE-0046/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0046,\" needs to be created for the OWASP Mobile Application Security Verification Standard. Relevant documentation can be found in the GitHub repository and on the designated website page. To proceed, ensure adherence to the provided MAS guidelines.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2685", @@ -30450,8 +31502,8 @@ "repository": 1127, "assignees": [], "labels": [ - 207, - 222 + 222, + 207 ] } }, @@ -30464,6 +31516,7 @@ "node_id": "I_kwDOBCbAAs6PEvni", "title": "[MASWE-0047] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0047\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-NETWORK/MASWE-0047.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-NETWORK/MASWE-0047/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0047,\" needs to be created for the repository. The details for this weakness can be found in the provided GitHub repository file and the corresponding website page. It's essential to adhere to the MAS guidelines linked in the issue. The task involves drafting and detailing this new weakness accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2686", @@ -30479,8 +31532,8 @@ "repository": 1127, "assignees": [], "labels": [ - 215, - 222 + 222, + 215 ] } }, @@ -30493,6 +31546,7 @@ "node_id": "I_kwDOBCbAAs6PE4JK", "title": "[MASWE-0048] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0048\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-NETWORK/MASWE-0048.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-NETWORK/MASWE-0048/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0048,\" needs to be created. The relevant documentation is available in the specified GitHub repository and on the associated website page. It is important to adhere to the provided MAS guidelines when developing this weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2688", @@ -30508,8 +31562,8 @@ "repository": 1127, "assignees": [], "labels": [ - 215, - 222 + 222, + 215 ] } }, @@ -30522,6 +31576,7 @@ "node_id": "I_kwDOBCbAAs6PE4Jz", "title": "[MASWE-0049] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0049\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-NETWORK/MASWE-0049.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-NETWORK/MASWE-0049/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "The issue requests the creation of a new weakness titled \"MASWE-0049\" as part of the MASVS-NETWORK framework. It includes links to the relevant GitHub repository file and the corresponding website page. Additionally, it emphasizes the need to adhere to the provided MAS guidelines. To move forward, the necessary documentation and details for this new weakness should be developed in accordance with the established guidelines.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2689", @@ -30537,8 +31592,8 @@ "repository": 1127, "assignees": [], "labels": [ - 215, - 222 + 222, + 215 ] } }, @@ -30551,6 +31606,7 @@ "node_id": "I_kwDOBCbAAs6PE4KT", "title": "[MASWE-0050] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0050\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-NETWORK/MASWE-0050.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-NETWORK/MASWE-0050/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0050,\" needs to be created and documented in the specified GitHub repository and on the associated website page. To proceed, ensure adherence to the provided MAS guidelines for proper formatting and content inclusion.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2690", @@ -30566,8 +31622,8 @@ "repository": 1127, "assignees": [], "labels": [ - 215, - 222 + 222, + 215 ] } }, @@ -30580,6 +31636,7 @@ "node_id": "I_kwDOBCbAAs6PE4K0", "title": "[MASWE-0051] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0051\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-NETWORK/MASWE-0051.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-NETWORK/MASWE-0051/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0051,\" needs to be created as outlined in the provided GitHub repository link and the corresponding webpage. It is important to adhere to the specified MAS guidelines during this process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2691", @@ -30595,8 +31652,8 @@ "repository": 1127, "assignees": [], "labels": [ - 215, - 222 + 222, + 215 ] } }, @@ -30609,6 +31666,7 @@ "node_id": "I_kwDOBCbAAs6PE4LW", "title": "[MASWE-0052] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0052\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-NETWORK/MASWE-0052.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-NETWORK/MASWE-0052/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness labeled \"MASWE-0052.\" Relevant information can be found in the specified GitHub repository file and on the corresponding website page. It is important to adhere to the provided MAS guidelines while developing this weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2692", @@ -30624,8 +31682,8 @@ "repository": 1127, "assignees": [], "labels": [ - 215, - 222 + 222, + 215 ] } }, @@ -30638,6 +31696,7 @@ "node_id": "I_kwDOBCbAAs6PE48B", "title": "[MASWE-0053] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0053\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0053.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0053/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0053,\" needs to be created and documented. The relevant files can be found in the specified GitHub repository and on the associated website page. Adherence to the provided MAS guidelines is essential for this task. It is important to ensure that the new weakness is accurately defined and aligned with existing documentation standards.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2693", @@ -30667,6 +31726,7 @@ "node_id": "I_kwDOBCbAAs6PE48a", "title": "[MASWE-0054] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0054\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0054.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0054/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0054,\" needs to be created. Relevant documentation can be found in the specified GitHub repository and on the associated website page. It is essential to adhere to the provided MAS guidelines during this process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2694", @@ -30696,6 +31756,7 @@ "node_id": "I_kwDOBCbAAs6PE49N", "title": "[MASWE-0055] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0055\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0055.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0055/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0055,\" needs to be created according to the specified guidelines. The relevant files are located in the provided GitHub repository and website page links. Ensure adherence to the MAS guidelines while developing this entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2695", @@ -30725,6 +31786,7 @@ "node_id": "I_kwDOBCbAAs6PE49w", "title": "[MASWE-0056] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0056\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0056.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0056/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness entry for \"MASWE-0056.\" The relevant details can be found in a specified GitHub repository file and a dedicated website page. It is important to adhere to the provided MAS guidelines while completing this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2696", @@ -30754,6 +31816,7 @@ "node_id": "I_kwDOBCbAAs6PE4-X", "title": "[MASWE-0057] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0057\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0057.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0057/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0057,\" needs to be created and documented. Reference the specific GitHub repository file and the website page provided for detailed information. Ensure adherence to the MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2697", @@ -30783,6 +31846,7 @@ "node_id": "I_kwDOBCbAAs6PE4-7", "title": "[MASWE-0058] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0058\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0058.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0058/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0058,\" has been proposed for inclusion in the MASVS-PLATFORM. Relevant resources include the GitHub repository file and the dedicated website page. The issue emphasizes the need to adhere to the specified MAS guidelines during the creation process. It is essential to review these guidelines to ensure compliance and completeness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2698", @@ -30812,6 +31876,7 @@ "node_id": "I_kwDOBCbAAs6PE4_f", "title": "[MASWE-0059] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0059\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0059.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0059/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness titled \"MASWE-0059\" needs to be created. The details can be found in the specified GitHub repository and website page. It is important to adhere to the provided MAS guidelines while developing this weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2699", @@ -30841,6 +31906,7 @@ "node_id": "I_kwDOBCbAAs6PE5AN", "title": "[MASWE-0060] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0060\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0060.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0060/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0060,\" needs to be created for the OWASP MASVS platform. Relevant details can be found in the provided GitHub repository file and the associated website page. It is important to follow the specified MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2700", @@ -30870,6 +31936,7 @@ "node_id": "I_kwDOBCbAAs6PE5Aq", "title": "[MASWE-0061] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0061\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0061.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0061/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness titled \"MASWE-0061\" needs to be created. Relevant details can be found in the specified GitHub repository and on the corresponding website page. It is important to adhere to the provided MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2701", @@ -30899,6 +31966,7 @@ "node_id": "I_kwDOBCbAAs6PE5BU", "title": "[MASWE-0062] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0062\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0062.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0062/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0062,\" needs to be created. The relevant details can be found in the specified GitHub repository file and on the associated website page. It is essential to adhere to the provided MAS guidelines when developing this new entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2702", @@ -30928,6 +31996,7 @@ "node_id": "I_kwDOBCbAAs6PE5CI", "title": "[MASWE-0063] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0063\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0063.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0063/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0063,\" needs to be created. The relevant GitHub repository file and website page have been provided as references. It is important to adhere to the specified MAS guidelines while developing this new entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2703", @@ -30957,6 +32026,7 @@ "node_id": "I_kwDOBCbAAs6PE5Cx", "title": "[MASWE-0064] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0064\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0064.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0064/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0064,\" needs to be created. The relevant GitHub repository file and website page have been provided. To proceed, one should follow the specified MAS guidelines for proper documentation and formatting of the weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2704", @@ -30986,6 +32056,7 @@ "node_id": "I_kwDOBCbAAs6PE5De", "title": "[MASWE-0065] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0065\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0065.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0065/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0065,\" needs to be created and documented. The relevant GitHub repository file and website page have been provided for reference. It is essential to adhere to the MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2705", @@ -31015,6 +32086,7 @@ "node_id": "I_kwDOBCbAAs6PE5EF", "title": "[MASWE-0066] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0066\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0066.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0066/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness entry for \"MASWE-0066\" in the OWASP MASTG repository. The relevant files and website pages have been provided for reference. It is important to adhere to the specified MAS guidelines while completing this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2706", @@ -31044,6 +32116,7 @@ "node_id": "I_kwDOBCbAAs6PE5Et", "title": "[MASWE-0067] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0067\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0067.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0067/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness labeled \"MASWE-0067.\" The necessary details can be found in a specified GitHub repository file and a dedicated webpage. It is important to adhere to the provided MAS guidelines while completing this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2707", @@ -31073,6 +32146,7 @@ "node_id": "I_kwDOBCbAAs6PE5Fa", "title": "[MASWE-0068] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0068\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0068.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0068/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0068,\" needs to be created and documented. The relevant GitHub repository file and website page are provided for reference. It's important to adhere to the specified MAS guidelines during this process. Make sure to review the guidelines for proper documentation and formatting.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2708", @@ -31102,6 +32176,7 @@ "node_id": "I_kwDOBCbAAs6PE5GC", "title": "[MASWE-0069] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0069\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0069.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0069/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness identified as \"MASWE-0069\" needs to be created. The relevant documentation can be found in the specified GitHub repository file and on the associated website page. It's important to adhere to the provided MAS guidelines while developing this new weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2709", @@ -31131,6 +32206,7 @@ "node_id": "I_kwDOBCbAAs6PE5Gs", "title": "[MASWE-0070] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0070\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0070.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0070/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0070,\" needs to be created. The relevant GitHub repository and website page for this weakness have been provided. It is important to follow the specified MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2710", @@ -31160,6 +32236,7 @@ "node_id": "I_kwDOBCbAAs6PE5HR", "title": "[MASWE-0071] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0071\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0071.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0071/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0071,\" needs to be created. The relevant details can be found in the provided GitHub repository file and the corresponding website page. It is important to adhere to the specified MAS guidelines when developing this new entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2711", @@ -31189,6 +32266,7 @@ "node_id": "I_kwDOBCbAAs6PE5Hy", "title": "[MASWE-0072] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0072\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0072.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0072/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0072,\" needs to be created based on the provided GitHub repository file and website page. The task involves following the specified MAS guidelines to ensure proper documentation and alignment with existing standards. It is essential to review the guidelines thoroughly before proceeding with the creation of the weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2712", @@ -31218,6 +32296,7 @@ "node_id": "I_kwDOBCbAAs6PE-GP", "title": "[MASWE-0073] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0073\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0073.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0073/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness titled \"MASWE-0073.\" The specific details can be found in the provided GitHub repository file and the corresponding website page. It is essential to adhere to the outlined MAS guidelines while developing this weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2743", @@ -31247,6 +32326,7 @@ "node_id": "I_kwDOBCbAAs6PE-G1", "title": "[MASWE-0074] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0074\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-PLATFORM/MASWE-0074.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-PLATFORM/MASWE-0074/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness needs to be created for \"MASWE-0074.\" The relevant details can be found in the specified GitHub repository file and the website page. It is important to follow the provided MAS guidelines when developing this new weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2744", @@ -31276,6 +32356,7 @@ "node_id": "I_kwDOBCbAAs6PE-lj", "title": "[MASWE-0075] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0075\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0075.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0075/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0075,\" needs to be created in the specified GitHub repository and website page. The task includes adhering to the provided MAS guidelines. Ensure to review the guidelines for proper formatting and content requirements.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2745", @@ -31305,6 +32386,7 @@ "node_id": "I_kwDOBCbAAs6PE-mP", "title": "[MASWE-0076] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0076\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0076.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0076/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0076,\" needs to be created and documented. The relevant GitHub repository file and the corresponding website page have been provided. It is essential to adhere to the specified MAS guidelines when developing this new entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2746", @@ -31334,6 +32416,7 @@ "node_id": "I_kwDOBCbAAs6PE-nG", "title": "[MASWE-0077] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0077\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0077.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0077/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0077,\" needs to be created. Relevant GitHub repository and website pages have been provided for reference. It is important to follow the specified MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2747", @@ -31365,6 +32448,7 @@ "node_id": "I_kwDOBCbAAs6PE-n0", "title": "[MASWE-0078] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0078\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0078.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0078/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0078,\" needs to be created. The relevant GitHub repository file and website page have been provided for reference. It is important to follow the specified MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2748", @@ -31394,6 +32478,7 @@ "node_id": "I_kwDOBCbAAs6PE-ou", "title": "[MASWE-0079] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0079\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0079.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0079/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0079,\" needs to be created and documented. The relevant GitHub repository file and website page links have been provided for reference. It is important to adhere to the specified MAS guidelines when completing this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2749", @@ -31423,6 +32508,7 @@ "node_id": "I_kwDOBCbAAs6PE-pr", "title": "[MASWE-0080] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0080\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0080.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0080/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness entry for \"MASWE-0080\" within the OWASP MASVS framework. The details for the GitHub repository and the corresponding website page have been provided. To proceed, the author should adhere to the specified MAS guidelines for documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2750", @@ -31452,6 +32538,7 @@ "node_id": "I_kwDOBCbAAs6PE-qU", "title": "[MASWE-0081] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0081\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0081.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0081/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness titled \"MASWE-0081\" needs to be created. The relevant details are provided in the linked GitHub repository file and the designated website page. It is important to follow the specified MAS guidelines for the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2751", @@ -31481,6 +32568,7 @@ "node_id": "I_kwDOBCbAAs6PE-qo", "title": "[MASWE-0082] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0082\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0082.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0082/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness identified as \"MASWE-0082\" needs to be created. The relevant information can be found in the specified GitHub repository and associated website page. To proceed, ensure adherence to the provided MAS guidelines for documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2752", @@ -31510,6 +32598,7 @@ "node_id": "I_kwDOBCbAAs6PE-rf", "title": "[MASWE-0083] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0083\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0083.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0083/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness entry for \"MASWE-0083\" within the OWASP MASTG repository. The details for the weakness are provided in both a GitHub repository file and a dedicated website page. It is essential to follow the specified MAS guidelines for this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2753", @@ -31539,6 +32628,7 @@ "node_id": "I_kwDOBCbAAs6PE-sE", "title": "[MASWE-0084] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0084\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0084.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0084/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness titled \"MASWE-0084\" needs to be created, with the relevant details outlined in the specified GitHub repository and website page. To proceed, follow the provided MAS guidelines for structuring and documenting the weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2754", @@ -31568,6 +32658,7 @@ "node_id": "I_kwDOBCbAAs6PE-tC", "title": "[MASWE-0085] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0085\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0085.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0085/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0085,\" needs to be created and documented in the specified GitHub repository. The relevant file is located at the provided GitHub link, and additional details can be found on the associated website page. To proceed, ensure that the creation follows the established MAS guidelines linked in the issue.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2755", @@ -31597,6 +32688,7 @@ "node_id": "I_kwDOBCbAAs6PE-t5", "title": "[MASWE-0086] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0086\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0086.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0086/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0086,\" needs to be created and documented. The relevant information can be found in the specified GitHub repository and on the associated website page. It is essential to adhere to the provided MAS guidelines during the documentation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2756", @@ -31626,6 +32718,7 @@ "node_id": "I_kwDOBCbAAs6PE-uq", "title": "[MASWE-0087] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0087\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0087.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0087/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0087,\" needs to be created in the specified GitHub repository. The relevant documentation is available in the provided GitHub file and on the MASVS website page. It is important to adhere to the MAS guidelines while developing this weakness. Ensure to review the guidelines thoroughly to proceed effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2757", @@ -31655,6 +32748,7 @@ "node_id": "I_kwDOBCbAAs6PE-vU", "title": "[MASWE-0088] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0088\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-CODE/MASWE-0088.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-CODE/MASWE-0088/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0088,\" needs to be created. The relevant GitHub repository file and website page have been provided for reference. It is important to adhere to the specified MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2758", @@ -31684,6 +32778,7 @@ "node_id": "I_kwDOBCbAAs6PE_RR", "title": "[MASWE-0089] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0089\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0089.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0089/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0089,\" needs to be created. Reference the specified GitHub repository file and the corresponding website page for details. Ensure to adhere to the provided MAS guidelines when developing this weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2759", @@ -31713,6 +32808,7 @@ "node_id": "I_kwDOBCbAAs6PE_SM", "title": "[MASWE-0090] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0090\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0090.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0090/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness \"MASWE-0090\" needs to be created. The relevant GitHub repository file and website page have been provided for reference. It is essential to follow the specified MAS guidelines while developing this weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2760", @@ -31742,6 +32838,7 @@ "node_id": "I_kwDOBCbAAs6PE_S8", "title": "[MASWE-0091] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0091\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0091.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0091/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0091,\" needs to be created. The relevant details can be found in the specified GitHub repository file and the linked website page. It is important to follow the provided MAS guidelines while developing this new weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2761", @@ -31771,6 +32868,7 @@ "node_id": "I_kwDOBCbAAs6PE_Tg", "title": "[MASWE-0092] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0092\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0092.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0092/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness entry for \"MASWE-0092\" in the OWASP MASTG repository. The relevant files and guidelines for this task have been provided, including links to the GitHub repository and the official website page. To proceed, one should follow the specified MAS guidelines to ensure consistency and accuracy in the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2762", @@ -31800,6 +32898,7 @@ "node_id": "I_kwDOBCbAAs6PE_UT", "title": "[MASWE-0093] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0093\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0093.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0093/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0093,\" needs to be created, with specific details provided in the linked GitHub repository and website page. It is essential to follow the established MAS guidelines for this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2763", @@ -31829,6 +32928,7 @@ "node_id": "I_kwDOBCbAAs6PE_VF", "title": "[MASWE-0094] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0094\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0094.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0094/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0094,\" needs to be created. The associated GitHub repository file and website page have been provided for reference. Additionally, it's essential to follow the specified MAS guidelines for this task. Ensure that the documentation is aligned with the established standards.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2764", @@ -31858,6 +32958,7 @@ "node_id": "I_kwDOBCbAAs6PE_V1", "title": "[MASWE-0095] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0095\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0095.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0095/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0095,\" needs to be created following the established guidelines. The relevant files and resources for this task include a GitHub repository file and a website page dedicated to this weakness. It's essential to adhere to the MAS guidelines provided in the linked document when drafting this new entry.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2765", @@ -31887,6 +32988,7 @@ "node_id": "I_kwDOBCbAAs6PE_Wc", "title": "[MASWE-0096] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0096\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0096.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0096/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0096,\" needs to be created. The relevant documentation can be found in the specified GitHub repository file and on the website page. It is important to adhere to the provided MAS guidelines while developing this new weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2766", @@ -31916,6 +33018,7 @@ "node_id": "I_kwDOBCbAAs6PE_XV", "title": "[MASWE-0097] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0097\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0097.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0097/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, identified as \"MASWE-0097,\" needs to be created. Relevant documentation is available in both the GitHub repository and on the website page linked. It is essential to adhere to the provided MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2767", @@ -31945,6 +33048,7 @@ "node_id": "I_kwDOBCbAAs6PE_YF", "title": "[MASWE-0098] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0098\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0098.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0098/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0098,\" needs to be created and documented. The relevant GitHub repository file and website page for this weakness have been provided. It is important to follow the specified MAS guidelines when creating this documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2768", @@ -31976,6 +33080,7 @@ "node_id": "I_kwDOBCbAAs6PE_Yt", "title": "[MASWE-0099] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0099\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0099.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0099/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0099,\" needs to be created in the MASWE documentation. The relevant GitHub repository file and the corresponding website page have been provided. It's essential to adhere to the specified MAS guidelines while developing this weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2769", @@ -32005,6 +33110,7 @@ "node_id": "I_kwDOBCbAAs6PE_Zb", "title": "[MASWE-0100] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0100\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0100.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0100/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness identified as \"MASWE-0100\" needs to be created. The relevant documentation is available in the GitHub repository and on the associated website page. It is essential to follow the provided MAS guidelines when developing this new weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2770", @@ -32034,6 +33140,7 @@ "node_id": "I_kwDOBCbAAs6PE_aG", "title": "[MASWE-0101] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0101\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0101.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0101/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness labeled \"MASWE-0101\" is proposed for inclusion in the project. Relevant files can be found in the provided GitHub repository and the corresponding website page. The task requires adherence to the specified MAS guidelines for proper documentation and structure. To proceed, ensure that all necessary details are captured according to the guidelines.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2771", @@ -32063,6 +33170,7 @@ "node_id": "I_kwDOBCbAAs6PE_a6", "title": "[MASWE-0102] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0102\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0102.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0102/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness titled \"MASWE-0102\" needs to be created. The details for this weakness can be found in the specified GitHub repository file and on the corresponding website page. It is important to adhere to the provided MAS guidelines when developing this new weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2772", @@ -32092,6 +33200,7 @@ "node_id": "I_kwDOBCbAAs6PE_be", "title": "[MASWE-0103] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0103\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0103.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0103/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A request has been made to create a new weakness titled \"MASWE-0103.\" The relevant documentation can be found in the specified GitHub repository and on the associated website page. To proceed, it is essential to follow the provided MAS guidelines for creating this weakness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2773", @@ -32121,6 +33230,7 @@ "node_id": "I_kwDOBCbAAs6PE_cO", "title": "[MASWE-0104] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0104\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0104.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0104/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0104,\" needs to be created. Relevant documentation includes a specific file in the GitHub repository and a webpage dedicated to this weakness. It is essential to adhere to the provided MAS guidelines for the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2774", @@ -32150,6 +33260,7 @@ "node_id": "I_kwDOBCbAAs6PE_c9", "title": "[MASWE-0105] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0105\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0105.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0105/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness, \"MASWE-0105,\" needs to be created. Relevant resources include the specific GitHub repository file and the corresponding website page. It is essential to adhere to the provided MAS guidelines during the creation process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2775", @@ -32179,6 +33290,7 @@ "node_id": "I_kwDOBCbAAs6PE_dm", "title": "[MASWE-0106] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0106\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0106.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0106/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness labeled \"MASWE-0106\" needs to be created. Relevant documentation is available in the specified GitHub repository and on the associated website page. To proceed, ensure to adhere to the provided MAS guidelines for consistency and accuracy in the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2776", @@ -32208,6 +33320,7 @@ "node_id": "I_kwDOBCbAAs6PE_eo", "title": "[MASWE-0107] New MASWE Weakness", "body": "\nCreate a new weakness for \"MASWE-0107\":\n\n- **GitHub Repo file:** https://github.com/OWASP/owasp-mastg/blob/master/weaknesses/MASVS-RESILIENCE/MASWE-0107.md\n- **Website Page:** https://mas.owasp.org/MASWE/MASVS-RESILIENCE/MASWE-0107/\n\n**Follow the [MAS guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**\n", + "summary": "A new weakness titled \"MASWE-0107\" needs to be created. The details for this weakness can be found in the provided GitHub repository file and on the associated website page. It is important to follow the specified MAS guidelines when developing this content.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2777", @@ -32237,6 +33350,7 @@ "node_id": "I_kwDOBCbAAs6PaTlG", "title": "[Tool] Add HTTP Toolkit to Network Tools", "body": "Evaluate and potentially incorporate HTTP Toolkit\r\n\r\nhttps://github.com/httptoolkit\r\n\r\n\r\n### Discussed in https://github.com/OWASP/owasp-mastg/discussions/2665\r\n\r\n
\r\n\r\nOriginally posted by **saulpanders** July 9, 2024\r\n[HTTP Toolkit](https://httptoolkit.com/android/)\r\n\r\nIts another alternative to Burp proxy, it can intercept HTTP traffic for android devices. It also has some automated abilities to get around certain certificate pinning issues by automatically injecting a system CA on install.\r\n\r\nI dont have any affiliation with the tool, but used it recently and found it interesting
", + "summary": "The issue proposes the evaluation and potential incorporation of HTTP Toolkit into the Network Tools category. HTTP Toolkit is highlighted as an alternative to Burp proxy, specifically for intercepting HTTP traffic on Android devices, with features that help bypass certificate pinning issues. The discussion suggests that further examination of its capabilities and integration into the existing toolset may be beneficial. Consider conducting a thorough evaluation of HTTP Toolkit's functionalities and compatibility with current tools.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2793", @@ -32268,6 +33382,7 @@ "node_id": "I_kwDOBCbAAs6PgN-u", "title": "[TOOL] APKEnum", "body": "https://github.com/shivsahni/APKEnum\r\n\r\nIt's mentioned a few times in the MASTG, though it's python2.7 and doesn't look actively maintained. Have never used it myself, maybe just remove from mastg?", + "summary": "The issue raises concerns about the APKEnum tool, noting that it is based on Python 2.7 and appears to be poorly maintained. Since it is referenced multiple times in the MASTG, the suggestion is to consider removing it from the guidelines. A potential next step would be to evaluate the current relevance and functionality of APKEnum before making a decision.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2818", @@ -32296,6 +33411,7 @@ "node_id": "I_kwDOBCbAAs6PgOGd", "title": "[TOOL] patch-apk", "body": "Original: https://github.com/NickstaDB/patch-apk\r\nActive: https://github.com/TheDauntless/patch-apk\r\n\r\nGreat tool for pulling APKs from a device/emulator, especially since it can merge different split-apks. The first repo is the original, while the second one is mine which has a bunch of the PRs included with additional features", + "summary": "The issue discusses the \"patch-apk\" tool, highlighting its functionality for extracting APKs from devices or emulators and its ability to merge different split-apks. It notes the existence of two repositories: the original and a fork that includes additional features and merged pull requests. To enhance the tool, it may be helpful to review and integrate more community contributions or improve existing features.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2819", @@ -32324,6 +33440,7 @@ "node_id": "I_kwDOBCbAAs6PgOqW", "title": "[TOOL] Android-CertKiller", "body": "https://github.com/51j0/Android-CertKiller\r\n\r\nMentioned in MASTG, but suggest replacing it with patch-apk as it can also add networksecurity config.", + "summary": "A suggestion has been made to replace Android-CertKiller with patch-apk, as the latter can also provide network security configuration capabilities. It is recommended to evaluate the features of both tools and consider implementing the suggested change.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2820", @@ -32352,6 +33469,7 @@ "node_id": "I_kwDOBCbAAs6PgPIQ", "title": "[TOOL] AndroBugs_Framework", "body": "https://github.com/AndroBugs/AndroBugs_Framework\r\n\r\nMentioned in MASVS but project is archived. Suggest switching it out with QUARK https://github.com/quark-engine/quark-engine", + "summary": "The issue discusses the AndroBugs_Framework, which has been mentioned in the Mobile Application Security Verification Standard (MASVS) but is currently archived. The suggestion is to replace it with the QUARK engine instead. Moving forward, it would be beneficial to evaluate and possibly update the references in MASVS to include QUARK as a suitable alternative.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2821", @@ -32381,6 +33499,7 @@ "node_id": "I_kwDOBCbAAs6PgPLg", "title": "[TOOL] Quark", "body": "https://github.com/quark-engine/quark-engine\r\n\r\nContains a bunch of rules for both static security analysis and static malware analysis", + "summary": "The issue discusses the Quark tool, which includes various rules designed for both static security analysis and static malware analysis. It suggests that further development or enhancement of these rules may be necessary. A review or update of the existing rules could improve the tool's effectiveness.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2822", @@ -32410,6 +33529,7 @@ "node_id": "I_kwDOBCbAAs6PgRTW", "title": "[TOOL] General cleanup", "body": "The following links should probably be deleted and/or replaced with alternatives:\r\n\r\nhttps://github.com/DanTheMan827/ios-app-signer\r\nhttps://github.com/DerekSelander/dsdump\r\nhttps://github.com/Domilopment/apk-extractor\r\nhttps://github.com/JohnSundell/Unbox\r\nhttps://github.com/Magisk-Modules-Repo/submission\r\nhttps://github.com/ViRb3/TrustMeAlready\r\nhttps://github.com/aerogear/aerogear-ios-crypto\r\nhttps://github.com/dinosec/iphone-dataprotection\r\nhttps://github.com/dogriffiths/HeadFirstAndroid\r\nhttps://github.com/fanxs-t/Android-SSL_read-write-Hook\r\nhttps://github.com/flankerhqd/JAADAS\r\nhttps://github.com/hkellaway/Gloss\r\nhttps://github.com/iSECPartners/Android-SSL-TrustKiller\r\nhttps://github.com/linkedin/qark\r\nhttps://github.com/limneos/classdump-dyld\r\nhttps://github.com/jrmdev/mitm_relay\r\nhttps://github.com/mseclab/nathan", + "summary": "The issue outlines a request for general cleanup by suggesting the deletion or replacement of several outdated or less relevant links from the repository. It lists a series of GitHub repositories that may no longer be necessary. To address this, a review of the linked repositories should be conducted to determine their current relevance and decide on appropriate actions, such as removal or substitution with more current resources.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2825", @@ -32440,6 +33560,7 @@ "node_id": "I_kwDOBCbAAs6Pi5bZ", "title": "[TOOL] Checksec", "body": "Checksec is a bash shell script that is used to check the properties of executables (like PIE, RELRO, Canaries, ASLR, Fortify Source).\r\n\r\nhttps://slimm609.github.io/checksec.sh/\r\nhttps://github.com/slimm609/checksec.sh", + "summary": "The issue discusses the Checksec tool, a bash shell script designed to assess various security properties of executables, including Position Independent Executables (PIE), Read-Only Relocation (RELRO), stack canaries, Address Space Layout Randomization (ASLR), and Fortify Source. It suggests that users may need to review the documentation for effective usage and possibly contribute to enhancements or bug fixes for the script.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2826", @@ -32468,6 +33589,7 @@ "node_id": "I_kwDOBCbAAs6QCFHa", "title": "💲🎉 New Donation", "body": "### 🙂 Your Name\n\nRégis Senet\n\n### 🏢 Company Name\n\nORHUS\n\n### 📧 E-Mail Address\n\nregis.senet@orhus.fr\n\n### 💵 Donation Amount\n\n500\n\n### 📦 Donation Package\n\nGood Samaritan (USD 500; 1 year; 1 paperback MASTG)\n\n### 📘 MASTG Paperback Copies\n\n📮 Yes, please send them to me.\n\n### 📋 Code of Conduct and Donation Conditions\n\n- [X] I agree to follow this project's Code of Conduct and Donation conditions.", + "summary": "A new donation of $500 has been made by an individual representing ORHUS. The donor has selected the \"Good Samaritan\" package, which includes a paperback copy of the MASTG. The donor has also agreed to adhere to the project's Code of Conduct and donation conditions. \n\nIt may be necessary to process the donation and arrange for the delivery of the paperback copy.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2832", @@ -32482,8 +33604,8 @@ "author": 513, "repository": 1127, "assignees": [ - 469, - 474 + 474, + 469 ], "labels": [ 210, @@ -32500,6 +33622,7 @@ "node_id": "I_kwDOBCbAAs6Q8UTs", "title": "Update MAS Adoption section for ADA", "body": "Update https://github.com/OWASP/owasp-mastg/blob/master/Document/0x02b-MASVS-MASTG-Adoption.md#google-android \r\n\r\nwith new info from https://security.googleblog.com/2023/11/evolving-app-defense-alliance.html \r\nand https://github.com/appdefensealliance/ASA-WG/tree/main/Mobile%20App%20Profile ", + "summary": "The issue requests an update to the MAS Adoption section for ADA in the specified document. The updates should incorporate new information from the provided Google security blog post and the App Defense Alliance resources. It is necessary to review these sources and integrate relevant content to ensure the section reflects the latest developments.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2836", @@ -32528,6 +33651,7 @@ "node_id": "I_kwDOBCbAAs6Q_9Ah", "title": "[Tool] Add Binwalk ", "body": "Add MASTG-TOOL-XXXX.md to tools/ and update reference in /tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#extracting-the-entitlements-plist-from-the-app-binary\n\nNewer fork: https://github.com/OSPG/binwalk\n\nOriginal project: https://github.com/ReFirmLabs/binwalk", + "summary": "The issue suggests adding a new documentation file, MASTG-TOOL-XXXX.md, to the tools directory and updating the reference in the specified test case related to extracting the entitlements plist from the app binary. It mentions a newer fork of the Binwalk tool and provides links to both the newer version and the original project. To address this issue, the necessary documentation and references should be created or updated accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2838", @@ -32556,6 +33680,7 @@ "node_id": "I_kwDOBCbAAs6RABqn", "title": "Add missing reference to bytecode viewer and add missing tool files ", "body": "Add missing reference to bytecode viewer (MASTG-TOOL-0014) to `/techniques/android/MASTG-TECH-0017.md`\n\n\nAdd new MASTG-TOOL-xxxx files to tools/ for any tools mentioned in `/techniques/android/MASTG-TECH-0017.md` that have no dedicated file in `tools/` Update the references in MASTG-TECH-0017.md, and potentially other files, accordingly.\n\n\nTools markdown files must always have a yaml frontmatter:\n\n```\n---\ntitle: Apktool\nplatform: android\nsource: https://github.com/iBotPeaches/Apktool\n---\n\n...\n```\n", + "summary": "The issue requests the addition of a reference to the bytecode viewer in the document located at `/techniques/android/MASTG-TECH-0017.md` (MASTG-TOOL-0014). Additionally, it calls for the creation of new markdown files for tools mentioned in the same document that currently lack dedicated entries in the `tools/` directory. It emphasizes that these markdown files must include a YAML frontmatter. The necessary updates to the references in MASTG-TECH-0017.md and potentially other files should also be made. \n\nTo address this issue, ensure that the bytecode viewer reference is added and create the required tool files in the specified format.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2839", @@ -32584,6 +33709,7 @@ "node_id": "I_kwDOBCbAAs6RCvzD", "title": "New Dart calling convention ", "body": "Update the relevant tools (e.g. blutter, reflutter), techniques and chapters if necessary.\n\nSee\n\n- https://cryptax.medium.com/dart-shifts-to-standard-calling-convention-26dc65f8d15a\n- https://github.com/cryptax/talks/blob/master/ref/flutter-ref.pdf\n\n", + "summary": "The issue discusses the need to update various tools and documentation related to Dart's new calling convention. Specifically, updates are required for tools such as blutter and reflutter, as well as relevant techniques and chapters. It references external resources for further information on the changes. To address this issue, the necessary updates to tools and documentation should be implemented to align with the new calling convention.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2841", @@ -32610,6 +33736,7 @@ "node_id": "I_kwDOBCbAAs6RGgzU", "title": "[TOOL] NoPE Proxy ", "body": "NoPE Proxy serves as a Burp Suite Extension designed for proxying Non-HTTP Traffic.\r\n\r\nLink: https://github.com/summitt/Nope-Proxy", + "summary": "The GitHub issue discusses the NoPE Proxy, a Burp Suite Extension intended for proxying non-HTTP traffic. It appears there are requests for enhancements or bug fixes related to its functionality. To address the issue, a review of the existing code and user feedback may be necessary to implement the required improvements.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2844", @@ -32638,6 +33765,7 @@ "node_id": "I_kwDOBCbAAs6RtCCj", "title": "Add Additional CWE mappings to MASWE", "body": "MASWE supports CWE mappings already:\r\n\r\nhttps://github.com/search?q=repo%3AOWASP%2Fowasp-mastg%20%22cwe%3A%22&type=code\r\n\r\nFor example, in MASWE-0041:\r\n\r\n```yaml\r\nmappings:\r\n masvs-v1: [MSTG-AUTH-1]\r\n masvs-v2: [MASVS-AUTH-2]\r\n cwe: [603, 307, 287]\r\n```\r\n\r\nReview the suggestions below and add the remaining missing mappings to the rest of the MASWE.\r\n\r\n\r\n### Discussed in https://github.com/OWASP/owasp-mastg/discussions/2857\r\n\r\n
\r\n\r\nOriginally posted by **poffo-mobisec** August 2, 2024\r\nI love the introduction of Weaknesses in mobile security. It was missing and it is brilliant. But let's go straight to the point.\r\n\r\nNowadays most of enterprises have standardized systems and works with CWE.\r\nHave you considered relate each MASWE to a CWE, to ease the risk management and company integration?\r\n\r\nI think this could give a lot of extra value to the project, allowing MASWE to be very specific on mobile weaknesses but at the same time bring compatibility with nowadays market.\r\n\r\nTHE FOLLOWING CONTENT IS AI-GENERATED, so this work would absolutlely need a check, but it gives the idea of the result:\r\n\r\n| MASWE ID | MASWE Title | Relevant CWE ID | CWE Title |\r\n|----------------|----------------------------------------------------------|-----------------|----------------------------------------------------|\r\n| MASWE-0001 | Insertion of Sensitive Data into Logs | CWE-532 | Insertion of Sensitive Information into Log File |\r\n| MASWE-0002 | Sensitive Data Stored With Insufficient Access Restrictions | CWE-200 | Exposure of Sensitive Information to an Unauthorized Actor |\r\n| MASWE-0003 | Sensitive Data Remains in App Backups | CWE-212 | Improper Cross-boundary Removal of Sensitive Data |\r\n| MASWE-0004 | Unencrypted Sensitive Data Stored in Non-Volatile Memory | CWE-311 | Missing Encryption of Sensitive Data |\r\n| MASWE-0005 | Insecure Data Storage in Shared Preferences | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0006 | Insecure Data Storage in SQL Databases | CWE-312 | Cleartext Storage of Sensitive Information |\r\n| MASWE-0007 | Insecure Data Storage in External Storage | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0008 | Insecure Data Storage in Cloud Services | CWE-256 | Unprotected Storage of Credentials |\r\n| MASWE-0009 | Insecure Data Storage in Cache | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0010 | Insecure Data Storage in Clipboard | CWE-532 | Insertion of Sensitive Information into Log File |\r\n| MASWE-0011 | Sensitive Data in Application Memory | CWE-226 | Sensitive Information in Data Storage Element |\r\n| MASWE-0012 | Sensitive Data in System Logs | CWE-532 | Insertion of Sensitive Information into Log File |\r\n| MASWE-0013 | Sensitive Data in Browser Cache | CWE-200 | Exposure of Sensitive Information to an Unauthorized Actor |\r\n| MASWE-0014 | Sensitive Data in WebView | CWE-200 | Exposure of Sensitive Information to an Unauthorized Actor |\r\n| MASWE-0015 | Sensitive Data in URL | CWE-598 | Use of GET Request Method with Sensitive Query Strings |\r\n| MASWE-0016 | Lack of Data Protection During Transmission | CWE-319 | Cleartext Transmission of Sensitive Information |\r\n| MASWE-0017 | Insecure Use of Cryptography | CWE-327 | Use of a Broken or Risky Cryptographic Algorithm |\r\n| MASWE-0018 | Insecure Random Number Generation | CWE-338 | Use of Cryptographically Weak Pseudo-Random Number Generator (PRNG) |\r\n| MASWE-0019 | Missing Integrity Checks on Sensitive Data | CWE-354 | Improper Validation of Integrity Check Value |\r\n| MASWE-0020 | Missing Confidentiality Protections | CWE-311 | Missing Encryption of Sensitive Data |\r\n| MASWE-0021 | Sensitive Data in Logs | CWE-532 | Insertion of Sensitive Information into Log File |\r\n| MASWE-0022 | Missing Security Controls for Sensitive Data | CWE-284 | Improper Access Control |\r\n| MASWE-0023 | Insecure Use of Hashing | CWE-327 | Use of a Broken or Risky Cryptographic Algorithm |\r\n| MASWE-0024 | Unsecured External Communication | CWE-319 | Cleartext Transmission of Sensitive Information |\r\n| MASWE-0025 | Insecure Data Storage in Memory | CWE-200 | Exposure of Sensitive Information to an Unauthorized Actor |\r\n| MASWE-0026 | Sensitive Data in Third-Party Services | CWE-295 | Improper Certificate Validation |\r\n| MASWE-0027 | Insecure Data Transmission Using SMS | CWE-319 | Cleartext Transmission of Sensitive Information |\r\n| MASWE-0028 | Sensitive Data in Keyboard Cache | CWE-200 | Exposure of Sensitive Information to an Unauthorized Actor |\r\n| MASWE-0029 | Insecure Data Storage in Keychain | CWE-312 | Cleartext Storage of Sensitive Information |\r\n| MASWE-0030 | Insecure Data Storage in Shared Directory | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0031 | Insecure Data Storage in Logs | CWE-532 | Insertion of Sensitive Information into Log File |\r\n| MASWE-0032 | Insecure Data Storage in Debugging Information | CWE-532 | Insertion of Sensitive Information into Log File |\r\n| MASWE-0033 | Insecure Data Storage in Crash Reports | CWE-532 | Insertion of Sensitive Information into Log File |\r\n| MASWE-0034 | Insecure Data Storage in System Logs | CWE-532 | Insertion of Sensitive Information into Log File |\r\n| MASWE-0035 | Insecure Data Storage in Third-Party Components | CWE-295 | Improper Certificate Validation |\r\n| MASWE-0036 | Insecure Data Storage in Cloud Storage | CWE-256 | Unprotected Storage of Credentials |\r\n| MASWE-0037 | Insecure Data Storage in App Sandboxes | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0038 | Insecure Data Storage in Cookie Storage | CWE-315 | Cleartext Storage of Sensitive Information in a Cookie |\r\n| MASWE-0039 | Insecure Data Storage in Web Storage | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0040 | Insecure Data Storage in IndexedDB | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0041 | Insecure Data Storage in LocalStorage | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0042 | Insecure Data Storage in SessionStorage | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0043 | Insecure Data Storage in FileSystem API | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0044 | Insecure Data Storage in App Bundle | CWE-312 | Cleartext Storage of Sensitive Information |\r\n| MASWE-0045 | Insecure Data Storage in Application Data | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0046 | Insecure Data Storage in Application Code | CWE-312 | Cleartext Storage of Sensitive Information |\r\n| MASWE-0047 | Insecure Data Storage in System Services | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0048 | Insecure Data Storage in App Configuration | CWE-312 | Cleartext Storage of Sensitive Information |\r\n| MASWE-0049 | Insecure Data Storage in Environment Variables | CWE-312 | Cleartext Storage of Sensitive Information |\r\n| MASWE-0050 | Insecure Data Storage in Shared Objects | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0051 | Insecure Data Storage in Shared Libraries | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0052 | Insecure Data Storage in Shared Components | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0053 | Insecure Data Storage in Shared Resources | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0054 | Insecure Data Storage in Shared Applications | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0055 | Insecure Data Storage in Shared Files | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0056 | Insecure Data Storage in Shared Devices | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0057 | Insecure Data Storage in Shared Network Storage | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0058 | Insecure Data Storage in Shared Infrastructure | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0059 | Insecure Data Storage in Shared Services | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0060 | Insecure Data Storage in Shared Platforms | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0061 | Insecure Data Storage in Shared Cloud Services | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0062 | Insecure Data Storage in Shared Virtualization Platforms | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0063 | Insecure Data Storage in Shared Containers | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0064 | Insecure Data Storage in Shared Hosts | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0065 | Insecure Data Storage in Shared Hypervisors | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0066 | Insecure Data Storage in Shared Orchestration | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0067 | Insecure Data Storage in Shared Configuration Management | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0068 | Insecure Data Storage in Shared DevOps Pipelines | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0069 | Insecure Data Storage in Shared CI/CD Tools | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0070 | Insecure Data Storage in Shared Testing Environments | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0071 | Insecure Data Storage in Shared Monitoring Tools | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0072 | Insecure Data Storage in Shared Logging Services | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0073 | Insecure Data Storage in Shared Security Tools | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0074 | Insecure Data Storage in Shared Automation Tools | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0075 | Insecure Data Storage in Shared Resource Management Tools| CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0076 | Dependencies with Known Vulnerabilities | CWE-1104 | Use of Unmaintained Third-party Components |\r\n| MASWE-0077 | Running on a recent Platform Version Not Ensured | CWE-1105 | Insufficient Software Version Update |\r\n| MASWE-0078 | Latest Platform Version Not Targeted | CWE-1105 | Insufficient Software Version Update |\r\n| MASWE-0079 | App Runs on Jailbroken or Rooted Devices | CWE-862 | Incorrect Authorization |\r\n| MASWE-0080 | App Runs on Emulator | CWE-325 | Missing Cryptographic Step |\r\n| MASWE-0081 | Debugging Enabled | CWE-489 | Active Debug Code |\r\n| MASWE-0082 | Developer Options Enabled | CWE-489 | Active Debug Code |\r\n| MASWE-0083 | Unsafe Handling of Data From The User Interface | CWE-20 | Improper Input Validation |\r\n| MASWE-0084 | Unsafe Handling of Data from IPC | CWE-20 | Improper Input Validation |\r\n| MASWE-0085 | Insecure Inter-Process Communication | CWE-319 | Cleartext Transmission of Sensitive Information |\r\n| MASWE-0086 | SQL Injection | CWE-89 | Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection') |\r\n| MASWE-0087 | Insecure Parsing and Escaping | CWE-116 | Improper Encoding or Escaping of Output |\r\n| MASWE-0088 | Insecure Object Deserialization | CWE-502 | Deserialization of Untrusted Data |\r\n| MASWE-0089 | Improper Certificate Validation | CWE-295 | Improper Certificate Validation |\r\n| MASWE-0090 | Improper Use of Platform APIs | CWE-749 | Exposed Dangerous Method or Function |\r\n| MASWE-0091 | Sensitive Data in Logs | CWE-532 | Insertion of Sensitive Information into Log File |\r\n| MASWE-0092 | Insecure Data Storage in External Devices | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0093 | Sensitive Data in Backup Files | CWE-212 | Improper Cross-boundary Removal of Sensitive Data |\r\n| MASWE-0094 | Insecure Data Transmission Using Insecure Protocols | CWE-319 | Cleartext Transmission of Sensitive Information |\r\n| MASWE-0095 | Insecure Use of Third-party Libraries | CWE-1104 | Use of Unmaintained Third-party Components |\r\n| MASWE-0096 | Unencrypted Sensitive Data Stored in Volatile Memory | CWE-311 | Missing Encryption of Sensitive Data |\r\n| MASWE-0097 | Insecure Data Transmission Using Push Notifications | CWE-319 | Cleartext Transmission of Sensitive Information |\r\n| MASWE-0098 | Insecure Data Transmission Using Email | CWE-319 | Cleartext Transmission of Sensitive Information |\r\n| MASWE-0099 | Insecure Data Transmission Using Third-party Services | CWE-319 | Cleartext Transmission of Sensitive Information |\r\n| MASWE-0100 | Insecure Data Storage in Temporary Files | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0101 | Insecure Data Storage in Local Databases | CWE-312 | Cleartext Storage of Sensitive Information |\r\n| MASWE-0102 | Sensitive Data in Memory Dumps | CWE-226 | Sensitive Information in Data Storage Element |\r\n| MASWE-0103 | Insecure Data Storage in Shared Drives | CWE-922 | Insecure Storage of Sensitive Information |\r\n| MASWE-0104 | App Integrity Not Verified | CWE-353 | Missing Support for Integrity Check |\r\n| MASWE-0105 | Integrity of App Resources Not Verified | CWE-353 | Missing Support for Integrity Check |\r\n| MASWE-0106 | Official Store Verification Not Implemented | CWE-353 | Missing Support for Integrity Check |\r\n| MASWE-0107 | Runtime Code Integrity Not Verified | CWE-353 | Missing Support for Integrity Check |\r\n| MASWE-0108 | Sensitive Data in Network Traffic | CWE-319 | Cleartext Transmission of Sensitive Information |
", + "summary": "The issue discusses the need to enhance the Mobile Application Security Verification Standard (MASWE) by adding additional Common Weakness Enumeration (CWE) mappings. While MASWE already includes some CWE mappings, there are several missing that could improve compatibility with enterprise risk management frameworks. \n\nA detailed list of proposed mappings for various MASWE entries is provided, suggesting that each MASWE ID should be associated with relevant CWE identifiers to facilitate integration and understanding of mobile security weaknesses.\n\nTo address this, a review of the suggested mappings should be conducted, and the relevant additional CWE mappings should be incorporated into the MASWE framework.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2858", @@ -32655,8 +33783,8 @@ 514 ], "labels": [ - 222, - 224 + 224, + 222 ] } }, @@ -32669,6 +33797,7 @@ "node_id": "I_kwDOBCbAAs6TZH6A", "title": "[TOOL] Add ldid", "body": "https://theapplewiki.com/wiki/Dev:Ldid\r\nhttps://github.com/basti564/fakesigner/blob/master/fakesigner.sh\r\n\r\nhttps://mas.owasp.org/MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0082/\r\nhttps://mas.owasp.org/MASTG/techniques/ios/MASTG-TECH-0084/#debugging-with-lldb", + "summary": "The issue discusses the need to add the \"ldid\" tool to the project. It references relevant resources, including documentation from The Apple Wiki and a script from the fakesigner repository. Additionally, it points to specific tests and techniques outlined in the OWASP Mobile Application Security Testing Guide, highlighting the importance of ldid in debugging and resilience testing for iOS applications. To address this, integrating the ldid tool into the existing framework or workflow may be necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2880", @@ -32698,6 +33827,7 @@ "node_id": "I_kwDOBCbAAs6TqAyh", "title": "Update Suggested Reading books list", "body": "https://mas.owasp.org/MASTG/0x09-Suggested-Reading/\r\n\r\nMost books are quite outdated.", + "summary": "The issue highlights the need to update the Suggested Reading list for books, as many of the current selections are outdated. To address this, a review and refresh of the book recommendations should be conducted to include more current and relevant titles.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2882", @@ -32722,10 +33852,11 @@ "pk": 1147, "fields": { "nest_created_at": "2024-09-11T21:26:43.935Z", - "nest_updated_at": "2024-09-12T01:08:00.850Z", + "nest_updated_at": "2024-09-13T16:58:20.743Z", "node_id": "I_kwDOBCbAAs6WBW6e", "title": "[TOOL] Add pidcat", "body": "The tools assists in filtering the logs of a specific android application in a color-coded format.\r\n\r\nlink: https://github.com/JakeWharton/pidcat", + "summary": "The issue suggests adding a tool called pidcat, which helps filter logs for specific Android applications and presents them in a color-coded format. To address this, integrating pidcat into the project would enhance log management for users by making it easier to read and analyze application logs.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-mastg/issues/2889", @@ -32757,6 +33888,7 @@ "node_id": "MDU6SXNzdWUyNjg5MjI2OTQ=", "title": "Key Storage Server Side: Enable HTTPS", "body": "Documenting a fix for this bug:\r\n\r\n1) Enable HTTPS on the API call.\r\n2) Store the key in the keychain instead of a local variable.\r\n\r\nCan you enable both HTTP and HTTPS on this endpoint? This should be pretty easy to achieve using AWS certificate manager or . Or swap the endpoint over to an API Gateway lambda call, which supports HTTPS out of the box.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/igoat/issues/47", @@ -32783,6 +33915,7 @@ "node_id": "MDU6SXNzdWU0MDg1MDg3ODY=", "title": "Error on installing iGoat.ipa", "body": "I tried to install the ipa using installipa but failed :(\r\nHow to install the app?\r\n\r\n![img_0070](https://user-images.githubusercontent.com/30687786/52530908-d5858e00-2d3f-11e9-8d60-6f606dfd95e6.PNG)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/igoat/issues/56", @@ -32809,6 +33942,7 @@ "node_id": "MDU6SXNzdWU3MTA2Nzk3NTg=", "title": "A", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/igoat/issues/58", @@ -32831,10 +33965,11 @@ "pk": 1151, "fields": { "nest_created_at": "2024-09-11T21:27:10.433Z", - "nest_updated_at": "2024-09-11T21:27:10.433Z", + "nest_updated_at": "2024-09-13T16:14:47.990Z", "node_id": "I_kwDOBBEVjM5aTH6_", "title": "Does this project still have some support?", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/igoat/issues/60", @@ -32861,6 +33996,7 @@ "node_id": "MDU6SXNzdWU5NTExMzMwMjI=", "title": "Clean MASVS repo", "body": "The current repo contains a lot of blobs that are not needed and we should remove them, to reduce unnecessary bandwidth /storage usage when checking out the repo. \r\n \r\nThis seems to be the way to go to remove the majority of the large files https://rtyley.github.io/bfg-repo-cleaner/\r\n\r\nhttps://stackoverflow.com/a/17890278\r\n\r\n```bash\r\n$ git rev-list --objects --all \\\r\n| git cat-file --batch-check='%(objecttype) %(objectname) %(objectsize) %(rest)' \\\r\n| sed -n 's/^blob //p' \\\r\n| sort --numeric-sort --key=2 \\\r\n| cut -c 1-12,41- \\\r\n| $(command -v gnumfmt || echo numfmt) --field=2 --to=iec-i --suffix=B --padding=7 --round=nearest | grep \"MiB\"\r\nbed78e1f0030 1.0MiB cover.jpg\r\n13b742ea15b1 1.0MiB tools/cover.xcf\r\n5009b0037ff9 1.0MiB tools/cover.xcf\r\nc2da15abf619 1.0MiB cover.jpg\r\na0b71aff147a 1.5MiB asvs.md\r\n42e27494cd0f 2.1MiB Document-ko/masvs-korean.pdf\r\naaa0defcf959 3.2MiB cover.jpg\r\n431d9a53bcfa 3.2MiB Document/cover_full_page.jpg\r\n501891a42b7f 3.4MiB cover.jpg\r\n02ada71cfa1a 8.5MiB generated/MASVS-zhtw.docx\r\n084324f0a05e 8.5MiB generated/MASVS.docx\r\n866e9b417fc9 8.6MiB generated/MASVS-fr.docx\r\ne02abfee96ea 8.6MiB generated/MASVS-ru.docx\r\nc49ceb1580fc 8.6MiB generated/MASVS-ja.docx\r\n0c6cfb8d6ecd 8.6MiB generated/MASVS-es.docx\r\nf252841672eb 8.8MiB tools/reference.docx\r\n5d8e84016b62 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\n2a102144f42e 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\n0c8a5213c9a4 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-ja.pdf\r\n0a69782875f6 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-ja.pdf\r\n2132f50b4a03 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-ja.pdf\r\n0df043410507 10MiB generated/MASVS_Document-ja.pdf\r\n2c73e7ed6bce 10MiB generated/MASVS_Document-ja.pdf\r\n933566e64729 10MiB generated/MASVS_Document-ja.pdf\r\ndf3f82fa82db 10MiB generated/MASVS_Document-ja.pdf\r\n3a93a300d5af 10MiB generated/MASVS_Document-ja.pdf\r\n588a0e8fa910 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\n67cd9040e291 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\n9ce318feb7b6 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\nd15397a6d802 10MiB generated/MASVS_Document-ja.pdf\r\n1e757d203f6f 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\n6c6d8babb7c1 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\n853b94d6ee73 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\n9c69a4082908 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\nc9e15b4a8ac9 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\ncfd0ca49a655 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\neebca793154a 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\nff3f0be72479 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.1_Document-ja.pdf\r\n7c211e7bc781 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-zhtw.pdf\r\n950b3639c196 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-zhtw.pdf\r\n398df781f701 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-zhtw.pdf\r\n746684363818 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-zhtw.pdf\r\n77983bebd1e5 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-zhtw.pdf\r\n18422a443c53 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-zhtw.pdf\r\n7ea720842884 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-zhtw.pdf\r\nd9a95b29795f 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-ja.pdf\r\n956cc5620ad0 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-ja.pdf\r\n0af63352cc5e 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-ja.pdf\r\n271cf62c7f05 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-ja.pdf\r\nc23e8188d270 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-ja.pdf\r\n66cfde36ea1e 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.3_Document-zhtw.pdf\r\n5345ce6f7b8f 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.3_Document-ja.pdf\r\nf3f5904a3453 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.2_Document-ja.pdf\r\ne9c45397da16 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.3_Document-zhtw.pdf\r\n14c6b5de9479 10MiB generated/OWASP_Mobile_AppSec_Verification_Standard_1.1.3_Document-ja.pdf\r\n```\r\n\r\nThe repo size is at the moment over 120MB:\r\n\r\n```bash\r\n$ du -hc\r\n228K\t./tools/docker\r\n2.0M\t./tools\r\n560K\t./Document-fa/images\r\n728K\t./Document-fa\r\n1.5M\t./Document-fr/images\r\n1.6M\t./Document-fr\r\n816K\t./Document-es/images\r\n944K\t./Document-es\r\n1.4M\t./Document-de/images\r\n1.5M\t./Document-de\r\n560K\t./Document-hi/images\r\n776K\t./Document-hi\r\n696K\t./Document-ptpt/images\r\n820K\t./Document-ptpt\r\n964K\t./Document-ko/images\r\n1.1M\t./Document-ko\r\n588K\t./Document-ptbr/images\r\n720K\t./Document-ptbr\r\n800K\t./Document/images\r\n936K\t./Document\r\n976K\t./Document-ru/images\r\n1.1M\t./Document-ru\r\n4.0K\t./.github/workflows/config\r\n 24K\t./.github/workflows\r\n 32K\t./.github\r\n1.5M\t./Document-ja/images\r\n1.6M\t./Document-ja\r\n1.4M\t./Document-zhtw/images\r\n1.5M\t./Document-zhtw\r\n8.0K\t./.git/.gitstatus.HOddY1\r\n 20K\t./.git/objects/61\r\n 12K\t./.git/objects/0d\r\n 16K\t./.git/objects/95\r\n 16K\t./.git/objects/59\r\n 24K\t./.git/objects/92\r\n 16K\t./.git/objects/0c\r\n 24K\t./.git/objects/66\r\n 20K\t./.git/objects/3e\r\n3.7M\t./.git/objects/50\r\n 20K\t./.git/objects/68\r\n392K\t./.git/objects/57\r\n 32K\t./.git/objects/3b\r\n 20K\t./.git/objects/6f\r\n 12K\t./.git/objects/03\r\n136K\t./.git/objects/9b\r\n 36K\t./.git/objects/9e\r\n 20K\t./.git/objects/04\r\n 12K\t./.git/objects/6a\r\n 36K\t./.git/objects/32\r\n 32K\t./.git/objects/35\r\n 16K\t./.git/objects/69\r\n 40K\t./.git/objects/3c\r\n 16K\t./.git/objects/51\r\n 28K\t./.git/objects/3d\r\n4.0K\t./.git/objects/58\r\n 16K\t./.git/objects/67\r\n 56K\t./.git/objects/0b\r\n 24K\t./.git/objects/93\r\n 40K\t./.git/objects/94\r\n 40K\t./.git/objects/0e\r\n 20K\t./.git/objects/60\r\n8.0K\t./.git/objects/34\r\n 32K\t./.git/objects/5a\r\n 16K\t./.git/objects/5f\r\n 16K\t./.git/objects/33\r\n4.0K\t./.git/objects/05\r\n 32K\t./.git/objects/9d\r\n 36K\t./.git/objects/9c\r\n 20K\t./.git/objects/02\r\n8.0K\t./.git/objects/a4\r\n 24K\t./.git/objects/a3\r\n 28K\t./.git/objects/b5\r\n 12K\t./.git/objects/b2\r\n 32K\t./.git/objects/d9\r\n 16K\t./.git/objects/ac\r\n 24K\t./.git/objects/ad\r\n 32K\t./.git/objects/bb\r\n 20K\t./.git/objects/d7\r\n 12K\t./.git/objects/d0\r\n960K\t./.git/objects/be\r\n 20K\t./.git/objects/b3\r\n 16K\t./.git/objects/df\r\n 24K\t./.git/objects/da\r\n 20K\t./.git/objects/b4\r\n 36K\t./.git/objects/a2\r\n 20K\t./.git/objects/a5\r\n 32K\t./.git/objects/bd\r\n 48K\t./.git/objects/d1\r\n 28K\t./.git/objects/d6\r\n 12K\t./.git/objects/bc\r\n 20K\t./.git/objects/ae\r\n 48K\t./.git/objects/d8\r\n 24K\t./.git/objects/ab\r\n 24K\t./.git/objects/e5\r\n 32K\t./.git/objects/e2\r\n 20K\t./.git/objects/f4\r\n 12K\t./.git/objects/f3\r\n4.0K\t./.git/objects/eb\r\n 32K\t./.git/objects/c7\r\n 48K\t./.git/objects/c0\r\n 28K\t./.git/objects/ee\r\n 12K\t./.git/objects/c9\r\n 44K\t./.git/objects/fc\r\n 40K\t./.git/objects/fd\r\n 36K\t./.git/objects/f2\r\n 12K\t./.git/objects/f5\r\n 24K\t./.git/objects/e3\r\n 20K\t./.git/objects/cf\r\n 16K\t./.git/objects/ca\r\n4.0K\t./.git/objects/e4\r\n 40K\t./.git/objects/fe\r\n 16K\t./.git/objects/c8\r\n 12K\t./.git/objects/fb\r\n 32K\t./.git/objects/ed\r\n 28K\t./.git/objects/c1\r\n 16K\t./.git/objects/c6\r\n 12K\t./.git/objects/ec\r\n 52K\t./.git/objects/4e\r\n 48K\t./.git/objects/20\r\n 28K\t./.git/objects/18\r\n4.0K\t./.git/objects/27\r\n 32K\t./.git/objects/4b\r\n 96M\t./.git/objects/pack\r\n 20K\t./.git/objects/11\r\n 20K\t./.git/objects/7d\r\n 20K\t./.git/objects/29\r\n 12K\t./.git/objects/7c\r\n 52K\t./.git/objects/16\r\n 32K\t./.git/objects/42\r\n 20K\t./.git/objects/89\r\n 16K\t./.git/objects/45\r\n 32K\t./.git/objects/1f\r\n 20K\t./.git/objects/73\r\n 16K\t./.git/objects/87\r\n 24K\t./.git/objects/80\r\n 12K\t./.git/objects/74\r\n 12K\t./.git/objects/1a\r\n 24K\t./.git/objects/28\r\n 28K\t./.git/objects/17\r\n 28K\t./.git/objects/7b\r\n 36K\t./.git/objects/8f\r\n 24K\t./.git/objects/8a\r\n 16K\t./.git/objects/7e\r\n 16K\t./.git/objects/10\r\n 20K\t./.git/objects/19\r\n 20K\t./.git/objects/4c\r\n 36K\t./.git/objects/26\r\n 20K\t./.git/objects/21\r\n 28K\t./.git/objects/4d\r\n 28K\t./.git/objects/75\r\n 36K\t./.git/objects/81\r\n 12K\t./.git/objects/86\r\n 16K\t./.git/objects/72\r\n 40K\t./.git/objects/44\r\n 12K\t./.git/objects/2a\r\n 24K\t./.git/objects/2f\r\n8.0K\t./.git/objects/43\r\n 16K\t./.git/objects/88\r\n8.0K\t./.git/objects/9f\r\n 36K\t./.git/objects/6b\r\n 20K\t./.git/objects/07\r\n 28K\t./.git/objects/38\r\n 16K\t./.git/objects/00\r\n 16K\t./.git/objects/6e\r\n4.0K\t./.git/objects/9a\r\n 20K\t./.git/objects/36\r\n 28K\t./.git/objects/5c\r\n 28K\t./.git/objects/09\r\n 16K\t./.git/objects/5d\r\n 16K\t./.git/objects/31\r\n 0B\t./.git/objects/info\r\n 24K\t./.git/objects/91\r\n 24K\t./.git/objects/65\r\n 32K\t./.git/objects/62\r\n 32K\t./.git/objects/96\r\n 16K\t./.git/objects/3a\r\n 12K\t./.git/objects/54\r\n 20K\t./.git/objects/98\r\n 20K\t./.git/objects/53\r\n 44K\t./.git/objects/3f\r\n 20K\t./.git/objects/30\r\n968K\t./.git/objects/5e\r\n 20K\t./.git/objects/5b\r\n4.0K\t./.git/objects/37\r\n 32K\t./.git/objects/08\r\n 24K\t./.git/objects/6d\r\n 28K\t./.git/objects/01\r\n 40K\t./.git/objects/06\r\n8.0K\t./.git/objects/6c\r\n 24K\t./.git/objects/39\r\n 32K\t./.git/objects/99\r\n8.0K\t./.git/objects/52\r\n 32K\t./.git/objects/55\r\n 12K\t./.git/objects/97\r\n 12K\t./.git/objects/63\r\n 20K\t./.git/objects/0f\r\n 24K\t./.git/objects/0a\r\n 12K\t./.git/objects/64\r\n 44K\t./.git/objects/90\r\n 36K\t./.git/objects/bf\r\n 12K\t./.git/objects/d3\r\n 28K\t./.git/objects/d4\r\n 20K\t./.git/objects/ba\r\n 20K\t./.git/objects/a0\r\n 20K\t./.git/objects/a7\r\n 20K\t./.git/objects/b8\r\n 36K\t./.git/objects/b1\r\n 16K\t./.git/objects/dd\r\n 12K\t./.git/objects/dc\r\n 16K\t./.git/objects/b6\r\n 16K\t./.git/objects/a9\r\n 20K\t./.git/objects/d5\r\n 12K\t./.git/objects/d2\r\n 16K\t./.git/objects/aa\r\n120K\t./.git/objects/af\r\n 20K\t./.git/objects/b7\r\n8.0K\t./.git/objects/db\r\n 28K\t./.git/objects/a8\r\n 12K\t./.git/objects/de\r\n 12K\t./.git/objects/b0\r\n 20K\t./.git/objects/a6\r\n 12K\t./.git/objects/b9\r\n 12K\t./.git/objects/a1\r\n 20K\t./.git/objects/ef\r\n 12K\t./.git/objects/c3\r\n 40K\t./.git/objects/c4\r\n 24K\t./.git/objects/ea\r\n 20K\t./.git/objects/e1\r\n 24K\t./.git/objects/cd\r\n 20K\t./.git/objects/cc\r\n 28K\t./.git/objects/e6\r\n320K\t./.git/objects/f9\r\n 20K\t./.git/objects/f0\r\n 24K\t./.git/objects/f7\r\n 40K\t./.git/objects/e8\r\n 16K\t./.git/objects/fa\r\n 32K\t./.git/objects/ff\r\n8.0K\t./.git/objects/c5\r\n992K\t./.git/objects/c2\r\n 16K\t./.git/objects/f6\r\n 32K\t./.git/objects/e9\r\n 32K\t./.git/objects/f1\r\n8.0K\t./.git/objects/e7\r\n8.0K\t./.git/objects/cb\r\n 12K\t./.git/objects/f8\r\n 32K\t./.git/objects/ce\r\n 32K\t./.git/objects/e0\r\n 24K\t./.git/objects/46\r\n 28K\t./.git/objects/2c\r\n 20K\t./.git/objects/79\r\n8.0K\t./.git/objects/2d\r\n 24K\t./.git/objects/41\r\n 12K\t./.git/objects/83\r\n8.0K\t./.git/objects/1b\r\n 16K\t./.git/objects/77\r\n 40K\t./.git/objects/48\r\n 24K\t./.git/objects/70\r\n 24K\t./.git/objects/1e\r\n 40K\t./.git/objects/84\r\n 24K\t./.git/objects/4a\r\n 16K\t./.git/objects/24\r\n 28K\t./.git/objects/23\r\n 20K\t./.git/objects/4f\r\n 24K\t./.git/objects/8d\r\n8.0K\t./.git/objects/15\r\n 52K\t./.git/objects/12\r\n 24K\t./.git/objects/8c\r\n 24K\t./.git/objects/85\r\n 28K\t./.git/objects/1d\r\n 20K\t./.git/objects/71\r\n 44K\t./.git/objects/76\r\n 16K\t./.git/objects/1c\r\n 28K\t./.git/objects/82\r\n 24K\t./.git/objects/49\r\n 36K\t./.git/objects/40\r\n 24K\t./.git/objects/2e\r\n 16K\t./.git/objects/2b\r\n 36K\t./.git/objects/47\r\n 36K\t./.git/objects/78\r\n 16K\t./.git/objects/8b\r\n 12K\t./.git/objects/13\r\n 12K\t./.git/objects/7f\r\n 44K\t./.git/objects/7a\r\n 20K\t./.git/objects/14\r\n 16K\t./.git/objects/8e\r\n 28K\t./.git/objects/22\r\n 12K\t./.git/objects/25\r\n109M\t./.git/objects\r\n4.0K\t./.git/info\r\n 88K\t./.git/logs/refs/heads\r\n140K\t./.git/logs/refs/remotes/origin\r\n140K\t./.git/logs/refs/remotes\r\n232K\t./.git/logs/refs\r\n264K\t./.git/logs\r\n 48K\t./.git/hooks\r\n 80K\t./.git/refs/heads\r\n 20K\t./.git/refs/tags\r\n140K\t./.git/refs/remotes/origin\r\n140K\t./.git/refs/remotes\r\n252K\t./.git/refs\r\n8.0K\t./.git/.gitstatus.inYqNU\r\n110M\t./.git\r\n1.4M\t./Document-zhcn/images\r\n1.5M\t./Document-zhcn\r\n128M\t.\r\n128M\ttotal\r\n```", + "summary": "The repository currently contains numerous unnecessary large files that contribute to its size of over 120MB. To optimize the repository and reduce bandwidth and storage usage, it's suggested to remove these blobs. A recommended approach for this cleanup is to utilize the BFG Repo-Cleaner, which can efficiently eliminate large files. Users are encouraged to follow the guidance provided in the linked resources to perform this cleanup effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/owasp-masvs/issues/555", @@ -32887,6 +34023,7 @@ "node_id": "MDU6SXNzdWUzMDMwMzY2NTc=", "title": "Red color for detected issues in HTML", "body": "Hi,\r\n\r\nTo attract more attention to the issues in HTML reports, I'd suggest to display negative results from security perspective in red color. Currently this is reversed - for example if no backup file is found, the message is red.\r\n\r\nThanks.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/10", @@ -32915,6 +34052,7 @@ "node_id": "MDU6SXNzdWUzMDQ1MDY1MTA=", "title": "improve code quality", "body": "The current code is written in a way that makes it hard to contribute:\r\n* All modules share the same namespace, \r\n* there are global variables, that are reused in several modules\r\n* Almost no functions are existent\r\n\r\nTo make the code easier to maintain I suggest that:\r\n* the code should use `use warnings` and `use strict`\r\neverywhere. \r\n* all modules should follow the perl way to create modules with `package` \r\n * most global variables are made local\r\n\r\nAs a first step I would:\r\n* put the '*pl' files in `modules/` in a separate namespace, like `JoomScan/Check`\r\n* transform modules to packages\r\n* add 'use warnings' and use 'strict'\r\n* make the code work\r\n\r\nI would only change the code that needs to be changed to make the code work,\r\nto keep the extensive changes to a minimum.\r\n\r\nafter this is done, I would proceed with the rest of the code.\r\n\r\n\r\n\r\nsome example:\r\nmodules/robots.pl orig:\r\n\r\n```perl\r\ndprint(\"Checking robots.txt existing\");\r\n$response=$ua->get(\"$target/robots.txt\");\r\nmy $headers = $response->headers();\r\nmy $content_type =$headers->content_type();\r\nif ($response->status_line =~ /200/g and $content_type =~ /text\\/plain/g) {\r\n\t$source=$response->decoded_content;\r\n\tmy @lines = split /\\n/, $source;\r\n\t$probot=\"\";\r\n\tforeach my $line( @lines ) { \r\n\t\tif($line =~ /llow:/g){\r\n\t\t\t$between=substr($line, index($line, ': ')+2, 99999);\r\n\t\t\t$probot.=\"$target$between\\n\";\r\n\t\t}\r\n\t}\r\n\ttprint(\"robots.txt is found\\npath : $target/robots.txt \\n\\nInteresting path found from \r\n robots.txt\\n$probot\");\r\n}else{\r\n\t fprint(\"robots.txt is not found\");\r\n}\r\n```\r\n\r\nwould become JoomScan/Check/RobotsTXT.pm:\r\n\r\n```perl\r\n\r\npackage JoomScan::Check::RobotsTXT;\r\nuse warnings;\r\nuse strict;\r\nuse JoomScan::Logging qw(tprint dprint fprint)\r\n\r\nsub check {\r\n my ($ua, $target) = @_;\r\n dprint(\"Checking robots.txt existing\");\r\n my $response = $ua->get(\"$target/robots.txt\");\r\n my $headers = $response->headers();\r\n my $content_type =$headers->content_type();\r\n my $probot=\"\";\r\n if ($response->status_line =~ /200/g and \r\n $content_type =~ /text\\/plain/g) {\r\n\tmy $source = $response->decoded_content;\r\n\tmy @lines = split /\\n/, $source;\r\n\tforeach my $line( @lines ) { \r\n\t if($line =~ /llow:/g){\r\n\t my $between=substr($line, index($line, ': ')+2, 99999);\r\n\t $probot.=\"$target$between\\n\";\r\n\t }\r\n\t}\r\n\ttprint(\"robots.txt is found\\npath : $target/robots.txt \\n\\nInteresting path found from robots.txt\\n$probot\");\r\n }else{\r\n\tfprint(\"robots.txt is not found\");\r\n }\r\n}\r\n\r\n1;\r\n```\r\nI think that code could still be improved, but I would leave that for the next round of refactoring\r\n ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/17", @@ -32943,6 +34081,7 @@ "node_id": "MDU6SXNzdWUzMDQ5NTYwMDc=", "title": "Running from other directory", "body": "Hi,\r\n\r\nIf joomscan is called from directory, where is `reports` sub-dir does not exist, reports are not saved, but output stitll shows the message: `Your Reports : reports/$target`.\r\n\r\nIt should be mentioned when scan is started that no reports will be created because of the missing directory, or directory should be created.\r\n\r\nThanks.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/20", @@ -32969,6 +34108,7 @@ "node_id": "MDU6SXNzdWUzMDQ5NjA4MjA=", "title": "Robots.txt path incorrect when Disallow is empty", "body": "Hi,\r\nIf `robots.txt` contains Disallow line with no path, joomscan incorrectly displays the interesting path.\r\nrobots.txt:\r\n```\r\nUser-agent: *\r\nDisallow:\r\n```\r\nOutput:\r\n```\r\n[++] robots.txt is found\r\npath : http://target.example.org/robots.txt \r\n\r\nInteresting path found from robots.txt\r\nhttp://target.example.orgisallow:\r\n```\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/21", @@ -32995,6 +34135,7 @@ "node_id": "MDU6SXNzdWUzMDU0MTI3MDM=", "title": "Joomscan mis-reports Joomla version", "body": "Hi there,\r\n\r\nI've just checked a brand new joomla site with Joomscan (after a lot of hardening) and had an interesting report come back.\r\n\r\nI'm absolutely, 100% on Joomla 3.8.6. I'm on a VPS that was spun up just for this project, which started when Joomla was on 3.8.5.\r\n\r\nJoomscan has reported back the version as 2.5.\r\n\r\nAlso, it's listed a bunch of vulnerabilities that shouldn't be relevant to this version:\r\n\r\nhttps://www.dropbox.com/s/91k515an0gt9uv1/Screenshot%202018-03-15%2015.29.13.png?dl=0\r\n\r\nIt's also found an admin directory that doesn't exist: http://www.mysite.com/admin/\r\n\r\nGiven that it's misreporting these things, that's probably a good thing, insofar as an actual attacker will pursue outdated vectors. However I'm about to go through pen-testing and I'm curious to see if they'll just report back this list of vulnerabilities as if it's actually the case.\r\n\r\nRegards,\r\n\r\nKelsey\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/23", @@ -33021,6 +34162,7 @@ "node_id": "MDU6SXNzdWUzMDYxNjM3MTI=", "title": "add proper make/install procedure", "body": "As soon as issue #17 is integrated\r\nAdd a possibility to install joomscan with perl tools (MAKEFILE.PL ...)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/25", @@ -33049,6 +34191,7 @@ "node_id": "MDU6SXNzdWUzMDYyMzM0Njg=", "title": "transform the exploit database to yaml", "body": "The exploit database is hard to maintain,\r\nmoving it to yaml, makes it human readable and much easier to maintain and contribute to.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/26", @@ -33077,6 +34220,7 @@ "node_id": "MDU6SXNzdWUzMzMyMTI3Njc=", "title": "Database", "body": "dear sir,\r\ncan you explain the way by which database is update and add new entry in it. ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/27", @@ -33103,6 +34247,7 @@ "node_id": "MDU6SXNzdWUzMzMyMTUzNzI=", "title": "Update command", "body": "dear sir,\r\nno update command is given. i want to update the program with new vulnerabilities.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/28", @@ -33125,10 +34270,11 @@ "pk": 1162, "fields": { "nest_created_at": "2024-09-11T21:27:51.261Z", - "nest_updated_at": "2024-09-11T21:27:51.261Z", + "nest_updated_at": "2024-09-13T16:14:57.016Z", "node_id": "MDU6SXNzdWUzODkyMTYyOTQ=", "title": "Joomscan does nothing on basic scan", "body": "So I've opened up a privileged cmd window and I have perl installed, then I navigate to the folder where I extracted joomscan. However when I run the basic command `perl joomscan.pl --url www.mywebsite.com` I'm just returned to the prompt, nothing happens. I've checked the \"reports\" folder but nothing there either.\r\n\r\nWindows 10 Professional, 64bit", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/32", @@ -33155,6 +34301,7 @@ "node_id": "MDU6SXNzdWU0ODg4MDY2MDk=", "title": "LWP module error", "body": "> error while executing the program \"perl joomscan.pl\"\r\n\r\n![Screenshot_2019_0904_015546](https://user-images.githubusercontent.com/50936309/64206112-2bc24600-ceb7-11e9-8c69-353b52d051e0.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/43", @@ -33181,6 +34328,7 @@ "node_id": "MDU6SXNzdWU0OTg2OTY3MzE=", "title": "Report of components scan is incorrect", "body": "When I was tested joomla sites by the joomscan, I found the incorrect report about components vulnerability.\r\nIt's 2 issues. Please fix it.\r\n\r\n1. Not report vulnerability even if sites has components vulnerability.\r\nI guess root cause is variable scope that is incorrect.\r\nIn the components.pl, variable as comversion is use in version compare.\r\nSo you shouldn't define at line 47 with local variable.\r\n(for example, define at 42(my $comversion) then use at 47($comversion = $1))\r\n\r\n2. Not report CVE reference.\r\nI guess root cause is compare the strings that is incorrect.\r\nIn the components.pl at line 78, you should compare @matches[3] and /^-$/ because of /-/ is used as null at local vulnerability db.\r\n\r\n**Executed command** \r\n`perl joomscan.pl --url --ec`\r\n\r\n**Actual report**\r\n```\r\nName: *****\r\nLocation : /components//\r\nDirectory listing is enabled : /components//\r\nInstalled version : *.*\r\n[!] We found the component \"****\", but since the component version was not available we\r\ncannot ensure that it's vulnerable, please test it yourself. <- incorrect\r\nTitle : **** \r\nReference : https://www.exploit-db.com/exploits/****\r\nFixed in : *.*\r\n```\r\n\r\n**Expected report**\r\n```\r\nName: *****\r\nLocation : /components//\r\nDirectory listing is enabled : /components//\r\nInstalled version : *.*\r\n[!] We found the component \"****\". <- expect\r\nTitle : ****\r\nReference : http://www.cvedetails.com/cve/**** <- expect\r\nReference : https://www.exploit-db.com/exploits/****\r\nFixed in : *.*\r\n```", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/44", @@ -33207,6 +34355,7 @@ "node_id": "MDU6SXNzdWU1MzQ1OTg3Nzc=", "title": "Amblog 1.0 - Multiple SQL Injections NOT detected.", "body": "Joomla! Component Amblog 1.0 - Multiple SQL Injections\r\nhttps://www.exploit-db.com/exploits/14596\r\n\r\nThis vulnerability was not detected by JoomScan although it exists and tested successfully with SQLMap and manual exploitation techniques.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/50", @@ -33233,6 +34382,7 @@ "node_id": "MDU6SXNzdWU1ODA5NjA1MjU=", "title": "Many false positives when scanning a Joomla latest 3.9.16 Stable ", "body": "I have tried few times to scan the latest Joomla default install. This seems to be not working. The version shows as 1.5 and just lists the vulnerabilities in there, but all the vuls reported are false positives. Am I missing something in the setup \r\n\r\n```\r\n[+] FireWall Detector\r\n[++] Firewall not detected\r\n\r\n[+] Detecting Joomla Version\r\n[++] Joomla 1.5\r\n\r\n[+] Core Joomla Vulnerability\r\n[++] Joomla! 1.5 Beta 2 - 'Search' Remote Code Execution\r\nEDB : https://www.exploit-db.com/exploits/4212/\r\n\r\nJoomla! 1.5 Beta1/Beta2/RC1 - SQL Injection\r\nCVE : CVE-2007-4781\r\nEDB : https://www.exploit-db.com/exploits/4350/\r\n\r\nJoomla! 1.5.x - (Token) Remote Admin Change Password\r\nCVE : CVE-2008-3681\r\nEDB : https://www.exploit-db.com/exploits/6234/\r\n\r\nJoomla! 1.5.x - Cross-Site Scripting / Information Disclosure\r\nCVE: CVE-2011-4909\r\nEDB : https://www.exploit-db.com/exploits/33061/\r\n\r\nJoomla! 1.5.x - 404 Error Page Cross-Site Scripting\r\nEDB : https://www.exploit-db.com/exploits/33378/\r\n\r\nJoomla! 1.5.12 - read/exec Remote files\r\nEDB : https://www.exploit-db.com/exploits/11263/\r\n\r\nJoomla! 1.5.12 - connect back Exploit\r\nEDB : https://www.exploit-db.com/exploits/11262/\r\n\r\nJoomla! Plugin 'tinybrowser' 1.5.12 - Arbitrary File Upload / Code Execution (Metasploit)\r\nCVE : CVE-2011-4908\r\nEDB : https://www.exploit-db.com/exploits/9926/\r\n\r\nJoomla! 1.5 - URL Redirecting\r\nEDB : https://www.exploit-db.com/exploits/14722/\r\n\r\nJoomla! 1.5.x - SQL Error Information Disclosure\r\nEDB : https://www.exploit-db.com/exploits/34955/ \r\n\r\nJoomla! - Spam Mail Relay\r\nEDB : https://www.exploit-db.com/exploits/15979/\r\n\r\nJoomla! 1.5/1.6 - JFilterInput Cross-Site Scripting Bypass\r\nEDB : https://www.exploit-db.com/exploits/16091/\r\n\r\nJoomla! < 1.7.0 - Multiple Cross-Site Scripting Vulnerabilities\r\nEDB : https://www.exploit-db.com/exploits/36176/\r\n\r\nJoomla! 1.5 < 3.4.5 - Object Injection Remote Command Execution\r\nCVE : CVE-2015-8562\r\nEDB : https://www.exploit-db.com/exploits/38977/\r\n\r\nJoomla! 1.0 < 3.4.5 - Object Injection 'x-forwarded-for' Header Remote Code Execution\r\nCVE : CVE-2015-8562 , CVE-2015-8566 \r\nEDB : https://www.exploit-db.com/exploits/39033/\r\n\r\nJoomla! 1.5.0 Beta - 'pcltar.php' Remote File Inclusion\r\nCVE : CVE-2007-2199\r\nEDB : https://www.exploit-db.com/exploits/3781/\r\n\r\nJoomla! Component xstandard editor 1.5.8 - Local Directory Traversal\r\nCVE : CVE-2009-0113\r\nEDB : https://www.exploit-db.com/exploits/7691/\r\n```", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/51", @@ -33259,6 +34409,7 @@ "node_id": "MDU6SXNzdWU5NTM1NDg4Mjg=", "title": "buggy html reports", "body": "\"buggy\"\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/57", @@ -33285,6 +34436,7 @@ "node_id": "MDU6SXNzdWU5NTUyNzUxMjA=", "title": "joomscan.pl should be converted to Unix style line break", "body": "If we try to directly run joomscan on Linux, we get:\r\n\r\n```\r\n$ ./joomscan.pl \r\nzsh: ./joomscan.pl: bad interpreter: /usr/bin/perl^M: no such file or directory\r\n```\r\n\r\nIf we convert it with `dos2unix`:\r\n\r\n```\r\n$ ./joomscan.pl\r\n ____ _____ _____ __ __ ___ ___ __ _ _ \r\n (_ _)( _ )( _ )( \\/ )/ __) / __) /__\\ ( \\( )\r\n .-_)( )(_)( )(_)( ) ( \\__ \\( (__ /(__)\\ ) ( \r\n \\____) (_____)(_____)(_/\\/\\_)(___/ \\___)(__)(__)(_)\\_)\r\n\t\t\t(1337.today)\r\n \r\n --=[OWASP JoomScan\r\n +---++---==[Version : 0.0.7\r\n +---++---==[Update Date : [2018/09/23]\r\n +---++---==[Authors : Mohammad Reza Espargham , Ali Razmjoo\r\n --=[Code name : Self Challenge\r\n @OWASP_JoomScan , @rezesp , @Ali_Razmjo0 , @OWASP\r\n\r\n\r\n Usage: \r\n \tjoomscan.pl \r\n \tjoomscan.pl -u http://target.com/joomla\r\n joomscan.pl -m targets.txt\r\n \r\n \r\n Options: \r\n \tjoomscan.pl --help\r\n```\r\n\r\nMaybe other files should also be converted.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/58", @@ -33311,6 +34463,7 @@ "node_id": "MDU6SXNzdWU5NzkwOTk1NjE=", "title": "DB update ", "body": "Hi ! \r\nthanks for this incredible work ! \r\n\r\nis there any way to update the database vulnerability ? \r\nthe database is now only for joomla website which is build before 2018...\r\n\r\n\r\n ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/59", @@ -33337,6 +34490,7 @@ "node_id": "I_kwDOBAAbP85PtxKA", "title": "\"The target is not alive!\" message regardless of timeout", "body": "Hi,\r\n\r\nI get this message with my Joomla website, no matter what timeout value I take. I cannot mention the website here, but I do seem to have the same problem with another Joomla site of mine,\r\n\r\nBC", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/joomscan/issues/61", @@ -33363,6 +34517,7 @@ "node_id": "MDU6SXNzdWUyNjcyODE0ODE=", "title": "Update Attack vectors diagrams and tables", "body": "Update diagrams to be not PowerPoint 2010\r\n\r\nFix attack vectors / exploitability", + "summary": "The issue focuses on updating the attack vectors diagrams and tables, specifically to ensure that the diagrams are not created using PowerPoint 2010. Additionally, there's a need to fix the details regarding attack vectors and their exploitability. It may be necessary to explore modern diagramming tools and review the current content for accuracy.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/168", @@ -33395,6 +34550,7 @@ "node_id": "MDU6SXNzdWUyNzUxOTYxODc=", "title": "A10: Restructure sentence in Scenario #3?", "body": "The last sentence in this scenario seems to indicate that it was an external bank that performed fraudulent transactions, but I guess the intention is to say that it was discovered by an external bank. If so, I suggest changing from:\r\n\"The sandbox had been producing warnings for some time before the breach was detected due to fraudulent card transactions _by an external bank_.\"\r\n\r\nTo:\r\n\"The sandbox had been producing warnings for some time before the breach was detected _by an external bank_ due to fraudulent card transactions.\"", + "summary": "The issue discusses a potential ambiguity in the wording of a sentence in Scenario #3, suggesting that the current phrasing implies that an external bank conducted the fraudulent transactions, rather than discovering them. The proposed revision clarifies this intent. The suggested change should be implemented to enhance clarity in the text.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/389", @@ -33421,6 +34577,7 @@ "node_id": "MDU6SXNzdWUyNzUxOTgwNDg=", "title": "Please delete the old OWASP Top 10 repo", "body": "Hi there, \r\n\r\nCould @cscasanovas123 or @mtesauro or one of the OWASP GitHub owners please delete the old OWASP Top 10 repo? It is publishing a github.io page that is confusing the SEO of the real repo (this one). \r\n\r\nhttps://github.com/OWASP/OWASP-Top-10\r\n\r\nthanks,\r\nAndrew\r\n\r\n", + "summary": "A request has been made to delete the old OWASP Top 10 repository due to its interference with the SEO of the current repository by publishing a GitHub Pages site. It is suggested that the repository be removed to resolve the confusion.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/390", @@ -33451,6 +34608,7 @@ "node_id": "MDU6SXNzdWUyNzUyMTY3MjQ=", "title": "Update or delete the OWASP Top Ten Cheat Sheet", "body": "https://www.owasp.org/index.php/OWASP_Top_Ten_Cheat_Sheet must be updated or deleted. If it is being updated, perhaps we should go to a tabbed layout with the 2017 version being the front-most tab.", + "summary": "The OWASP Top Ten Cheat Sheet needs to be either updated or removed. If an update is planned, a suggestion is made to implement a tabbed layout that features the 2017 version as the primary tab. This indicates a need for a review and potential restructuring of the content.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/392", @@ -33481,6 +34639,7 @@ "node_id": "MDU6SXNzdWUyNzUyMTg5NjM=", "title": "Update or delete Mapping to WHID", "body": "I think it makes sense to delete [OWASP T10/Mapping to WHID](https://www.owasp.org/index.php/OWASP_Top_10/Mapping_to_WHID). I've emailed the WHID project leader to confirm.", + "summary": "The issue discusses the proposal to either update or delete the \"OWASP T10/Mapping to WHID\" page. The author has reached out to the WHID project leader for confirmation on this matter. If deletion is deemed appropriate, steps should be taken to remove the page accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/393", @@ -33511,6 +34670,7 @@ "node_id": "MDU6SXNzdWUyNzU3NDg1NDQ=", "title": "Spanish translation for Top 10 2017", "body": "Hello everyone. I'm available to help but it's my first time here and I'm not really too much into github. How should I start? What can I do?", + "summary": "A user is seeking guidance on how to contribute Spanish translations for the \"Top 10 2017\" project. They express a willingness to help but are unfamiliar with using GitHub. It would be beneficial for them to receive instructions on how to get started with the translation process and any specific tasks they can undertake.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/398", @@ -33539,6 +34699,7 @@ "node_id": "MDU6SXNzdWUyNzU3ODc5MTE=", "title": "Italian translation for Top10 2017", "body": "Tracking ticket for the Italian translation - Top10 2017. Are there volunteers? Did anybody start working on it already?", + "summary": "This GitHub issue is a tracking ticket for the Italian translation of the Top10 2017 document. It seeks to find volunteers for the translation and inquires whether any work has already begun. Interested contributors should step forward to assist with this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/399", @@ -33567,6 +34728,7 @@ "node_id": "MDU6SXNzdWUyNzU5Mzc1NTA=", "title": "Thai translation for Top-10 2017", "body": "I'm starting to translate OWASP Top-10 2017 in Thai language.\r\nMy repo is at https://github.com/ptantiku/Top10\r\n\r\nFeel free to join by sending pull requests, issues, or leave a comment in Github\r\nor in [OWASP Thailand Chapter's Facebook Group](https://www.facebook.com/groups/owaspthailand/)", + "summary": "A user is working on translating the OWASP Top-10 2017 document into Thai and has created a repository for this purpose. Contributions are welcome through pull requests, issues, or comments on GitHub and the OWASP Thailand Chapter's Facebook Group. To assist with this effort, potential contributors should consider reviewing the existing translation and submitting their own translations or suggestions.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/400", @@ -33595,6 +34757,7 @@ "node_id": "MDU6SXNzdWUyNzYyNjAwOTc=", "title": "Simplified Chinese translation for Top-10 2017", "body": "The Simplified Chinese translation team is starting the work today!", + "summary": "A team is beginning the Simplified Chinese translation for the Top-10 2017 project. It would be beneficial to monitor the progress and provide support as needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/401", @@ -33623,6 +34786,7 @@ "node_id": "MDU6SXNzdWUyNzY0NTY2Nzg=", "title": "Indonesian (Bahasa Indonesia) translation for Top-10 2017", "body": "I have checked that there is no Indonesian translation before for OWASP top 10 and I would like to create one. This is my very first time doing the translation project for OWASP, is there any rules or any format I need to follow? Also, is there any target date for completion?", + "summary": "A user is interested in creating an Indonesian translation for the OWASP Top 10 2017 document, noting that no translation currently exists. They are seeking guidance on any specific rules or formats to follow for the translation project and inquiring about a target completion date. It would be helpful to provide the user with any existing guidelines or resources related to translation projects for OWASP.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/403", @@ -33651,6 +34815,7 @@ "node_id": "MDU6SXNzdWUyNzY4MTA0NTg=", "title": " Korean Translation for Top10 2017 ", "body": "This directory is for the project of Korean Translation for TOP10 2017", + "summary": "The issue is focused on creating a Korean translation for the TOP10 2017 project. It indicates that there is a need for translation work to be completed in this directory. To address this, contributors should start working on translating the relevant content into Korean.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/404", @@ -33679,6 +34844,7 @@ "node_id": "MDU6SXNzdWUyNzc1MDM0MjA=", "title": "Swedish translation for Top 10 2017", "body": "Hi,\r\n\r\nCreating a tracking ticket to announce the Swedish translation efforts 😃 I've forked the master branch, feel free to join the translation team by leaving a comment here.\r\n\r\nhttps://github.com/xl-sec/Top10\r\n\r\nMVH Axel", + "summary": "A tracking ticket has been created for the Swedish translation efforts of the Top 10 2017 project. The master branch has been forked, and others are encouraged to join the translation team by commenting on the issue. Collaboration on the translation is welcome.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/405", @@ -33707,6 +34873,7 @@ "node_id": "MDU6SXNzdWUyODc4ODA5NjE=", "title": "Finnish translation for Top 10 2017 ", "body": "Any other volunteers for the Finnish translation? ", + "summary": "A request for volunteers to assist with the Finnish translation of the \"Top 10 2017\" content has been made. Additional contributors are needed to help complete this task.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/416", @@ -33735,6 +34902,7 @@ "node_id": "MDU6SXNzdWUyODk0MTA4MDk=", "title": "Cantonese (zh-hk) translation for OWASP Top 10 2017", "body": "Hi guys,\r\n\r\nI'm trying to kickstart the Cantonese translation of OWASP Top 10 2017. (Hong Kong-based so I will keep it as zh-HK first, but it can be zh-yue / yue as well).\r\n\r\n\r\nCheers,\r\nCharles", + "summary": "A user is initiating the translation of the OWASP Top 10 2017 document into Cantonese (zh-hk). They are based in Hong Kong and are considering using either zh-HK or zh-yue/yue for the translation. Assistance or contributions to this translation effort may be needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/418", @@ -33763,6 +34931,7 @@ "node_id": "MDU6SXNzdWUzMDEzNzQ1NDg=", "title": "Glossary", "body": "Sorry coming back with the glossary thing: [I know this was kind of discussed before][1], but while working on pt-pt translation I've realized that a Glossary is mandatory if we want the document to be accessible to everyone.\r\n\r\nThe following acronyms/abbreviations were taken from `0xa1-injection.md`, only:\r\n\r\n* SQL\r\n* LDAP\r\n* XPath\r\n* NoSQL\r\n* OS\r\n* XML\r\n* SMTP\r\n* ORM\r\n* EL\r\n* OGNL\r\n* URL\r\n* JSON\r\n* SOAP\r\n* CI\r\n* CD\r\n* SAST\r\n* DAST\r\n* API\r\n* PL/SQL\r\n* T-SQL\r\n\r\nOther terms like `scanner`, `fuzzer` may have its own entry in the Glossary: not everyone is familiar with them. Students are a good use case to the Glossary as they might be new to Application Security.\r\n\r\nI think this is an easy decision as we can recover the original Glossary from git history and work on it from there.\r\n\r\nWhat do you think?\r\n\r\n[1]: https://github.com/OWASP/Top10/issues?utf8=%E2%9C%93&q=label%3AGlossary+", + "summary": "The issue discusses the necessity of creating a glossary to enhance accessibility for all readers, particularly for those unfamiliar with specific terms and acronyms related to Application Security. A list of acronyms extracted from a specific document is provided, along with suggestions for additional entries. The author proposes recovering a previous version of the glossary from the git history to facilitate this process. It appears that a decision needs to be made regarding the implementation of the glossary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/430", @@ -33789,6 +34958,7 @@ "node_id": "MDU6SXNzdWUzMDIyNTIwNjU=", "title": "Turkish translation for TOP 10 2017", "body": "Hello,\r\n\r\nWe are starting Turkish translations. Please, feel free to join.\r\n\r\nCheers.", + "summary": "A new initiative for Turkish translations of the TOP 10 2017 content has been started, inviting others to participate in the translation efforts. Interested contributors are encouraged to join the project.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/432", @@ -33817,6 +34987,7 @@ "node_id": "MDU6SXNzdWUzMTk4ODQxNDA=", "title": "Persian (فارسی) translation for TOP 10 2017", "body": "Hello,\r\n\r\nWe are starting Persian (فارسی) translations.\r\nFeel free to join by sending pull requests, issues, or leave a comment in Github.\r\nMy repo is at https://github.com/Pouya47/Top10/tree/master/2017\r\nThanks.\r\n\r\nاگه توانایی یا وقت ترجمه ندارید می‌تونید تو بازخوانی متن ترجمه شده کمک کنید.\r\nممنون", + "summary": "A request has been made to initiate Persian (فارسی) translations for the TOP 10 2017 project. Contributions are welcomed through pull requests, issues, or comments on GitHub. There is also an invitation for those who may not have the time or ability to translate to assist with proofreading the translated texts. To participate, refer to the provided repository link.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/436", @@ -33845,6 +35016,7 @@ "node_id": "MDU6SXNzdWUzMzM2Mzg5OTg=", "title": "Machine Readable Format (could be in json, yaml etc)", "body": "I am working on **security automation** from checking web application for the OWASP top 10 vulnerabilities. I'd like to check for the respective `description`,` CWE-IDs` etc and make decisions automatically. I can imagine a machine-readable representation of the top 10 OWASP vulnerabilities that supports security automation. The information does not need to be as comprehensive as the actual document.. Is there such a document already existing ?", + "summary": "The issue discusses the need for a machine-readable format for the OWASP top 10 vulnerabilities, such as JSON or YAML, to facilitate security automation. The user is looking for a structured representation that includes key details like descriptions and CWE-IDs to aid in automated decision-making. A suggestion for addressing this issue would be to explore existing resources or documents that might provide this structured information. If such a document does not exist, creating one could be a valuable contribution.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/440", @@ -33871,6 +35043,7 @@ "node_id": "MDU6SXNzdWUzNDEzOTI2Mjg=", "title": "writing weakness! in page 22 of Release 2017", "body": "Hello,\r\nIn \"Business Impact\" column of summery table in page 22 of release 2017:\r\n\"**App Specific**\" should be \"**Business Specific**\" (refer to page 5 table).\r\nPlease correct and notify this correction!\r\nThanks.\r\n\r\n![top 10 summery](https://user-images.githubusercontent.com/41279309/42743810-cea2279a-88dc-11e8-9669-a0bf570ca0c5.JPG)\r\n\r\n![risk assessment](https://user-images.githubusercontent.com/41279309/42743827-03892666-88dd-11e8-826a-800210b19920.JPG)\r\n", + "summary": "The issue highlights a mistake in the \"Business Impact\" column of the summary table on page 22 of the 2017 release, where \"App Specific\" should be changed to \"Business Specific,\" as referenced in the table on page 5. A correction is needed, along with a notification regarding the update.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/442", @@ -33886,9 +35059,9 @@ "repository": 1132, "assignees": [], "labels": [ + 232, 230, - 231, - 232 + 231 ] } }, @@ -33901,6 +35074,7 @@ "node_id": "MDU6SXNzdWUzNDQxMDQ5NzU=", "title": "Brazilian Portuguese translation for TOP 10 2017", "body": "I would like to help with the Portuguese Translation but I don't know where to start?\r\n\r\nThanks", + "summary": "The issue involves a request for assistance with translating content into Brazilian Portuguese, specifically for the TOP 10 2017. The user is seeking guidance on how to begin the translation process. It would be helpful to provide resources or steps for getting started with the translation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/443", @@ -33929,6 +35103,7 @@ "node_id": "MDU6SXNzdWUzNDYxMDQ3MDY=", "title": "A3: Sensitive Data Exposure (Wrong CWE ID and reference)", "body": "In section __\"A3: Sensitive Data Exposure\"__ (page 10) under __References__→__External__ there is wrong _CWE ID_ and corresponding _wrong [link](https://cwe.mitre.org/data/definitions/220.html)_, i.e **CWE-220** should be → **CWE-202** with corresponding [link](https://cwe.mitre.org/data/definitions/202.html). The name is OK.", + "summary": "The issue highlights an incorrect CWE ID listed in the \"A3: Sensitive Data Exposure\" section, where CWE-220 is mistakenly referenced instead of CWE-202. The issue also points out the need to update the corresponding link to the correct definition. To address this, the CWE ID and the link should be corrected.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/444", @@ -33944,8 +35119,8 @@ "repository": 1132, "assignees": [], "labels": [ - 230, - 233 + 233, + 230 ] } }, @@ -33958,6 +35133,7 @@ "node_id": "MDU6SXNzdWUzNjYwMjM4OTk=", "title": "A10:2017-Insufficient Logging&Monitoring", "body": "Hi!\r\nI was reading the amazing book OWASP Top 10 2017 and I have found two informations about breach detection.\r\n\r\nThe first one is 200 days:\r\n\r\n![image](https://user-images.githubusercontent.com/4174325/46368866-b2432780-c657-11e8-9344-bf4c5683f2ae.png)\r\n\r\nand another one, 191 days:\r\n\r\n![image](https://user-images.githubusercontent.com/4174325/46368885-c129da00-c657-11e8-9ab2-8d8073df7a27.png)\r\n\r\nCould you think to use 191 days in both places instead of 200 days and 191 days?\r\n\r\nI was a bit confused at first when I saw the two pieces of information.\r\n\r\nCongratulations for the great job in this project!\r\n\r\nI really hope to help you.", + "summary": "The issue discusses the inconsistency in breach detection times mentioned in the OWASP Top 10 2017 book, specifically the use of \"200 days\" and \"191 days\" in different sections. The user suggests standardizing both references to \"191 days\" to avoid confusion. It may be beneficial to review the content and implement this change for clarity.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/449", @@ -33984,6 +35160,7 @@ "node_id": "MDU6SXNzdWUzOTE2MDY0MTg=", "title": "Page numbering in text", "body": "In Section **'Note About Risks'** (page 21 as in the TOC, i.e. page 22) there is a phrase → *(as referenced in the Acknowledgements on page 25)*.\r\nActually that page number is **25**, but in the TOC that page is **24**. I think something should be done about that: start page numbering from the cover page, or refer to pages as they appear in the TOC.", + "summary": "There is an inconsistency in page numbering within the document, specifically in the 'Note About Risks' section where a reference to the Acknowledgements incorrectly states the page number. The actual page number is 25, but the Table of Contents lists it as 24. The issue suggests that a solution should involve either starting page numbering from the cover page or aligning the references with the page numbers as they appear in the TOC. Further clarification or adjustment is needed to resolve this discrepancy.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/456", @@ -34012,6 +35189,7 @@ "node_id": "MDU6SXNzdWUzOTc0MjYyNzA=", "title": "Ukrainian translation for TOP 10 2017", "body": "Starting ukrainian translation for OWASP Top 10.\r\nJoin at https://github.com/miguelsuddya/Top10", + "summary": "A new Ukrainian translation for the OWASP Top 10 is being initiated. Contributors are encouraged to participate in the translation efforts by joining the project on GitHub.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/459", @@ -34029,8 +35207,8 @@ 198 ], "labels": [ - 229, - 235 + 235, + 229 ] } }, @@ -34043,6 +35221,7 @@ "node_id": "MDU6SXNzdWUzOTgyMjM5NTA=", "title": "Google preview for Top 10 page is wrong", "body": "If you Google \"OWASP Top 10\", the preview pane you get back shows the list for 2010: [Google](https://www.google.com/search?q=owasp+top+10)\r\n\r\nIn the page source for the top 10 page, there is some markup starting with \"For 2010, \" and this is what is reflected in Google, although I don't know why this particular block gets picked out.\r\n\r\nThis should be updated to show 2017.", + "summary": "The issue reports that the Google preview for the \"OWASP Top 10\" search query incorrectly displays the 2010 list instead of the updated 2017 version. The page source contains outdated markup, specifically starting with \"For 2010,\" which is being used in the Google preview. To resolve this, the relevant markup should be updated to reflect the 2017 list.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/461", @@ -34069,6 +35248,7 @@ "node_id": "MDU6SXNzdWU0MjMyNjA4MjY=", "title": "Tiny typo on german translation for Top 10 2017", "body": "> A10:2017-Unzureichendes Logging & Moniting\r\n\r\nShould be fixed by: A10:2017-Unzureichendes Logging & Monitoring", + "summary": "There is a typo in the German translation for \"Top 10 2017,\" where \"Monitoring\" is misspelled as \"Moniting.\" The correction needed is to change it to \"Monitoring.\"", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/467", @@ -34095,6 +35275,7 @@ "node_id": "MDU6SXNzdWU0MjU2NzgxMTY=", "title": "A8:2017", "body": "Under \"Impacts\": \"The impact of deserialization flaws cannot be understated.\"\r\nPretty sure you mean \"overstated.\"", + "summary": "The issue points out a potential error in the wording of a statement regarding the impacts of deserialization flaws, suggesting that \"overstated\" would be a more appropriate term than \"understated.\" It may need to be revised for clarity.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/468", @@ -34121,6 +35302,7 @@ "node_id": "MDU6SXNzdWU0NTUyMjcyMzc=", "title": "A2:2017 ", "body": "\"The prevalence of broken authentication is\r\nwidespread...\"\r\n\r\n>>> widespread is corresponding to level 3 and you wrote level 2 for prevalence which is correspond to \"common\" ", + "summary": "The issue discusses a discrepancy in the classification of the term \"prevalence\" in relation to broken authentication. The term is currently categorized at level 2 (\"common\"), while it should be adjusted to level 3 (\"widespread\") to accurately reflect the severity of the issue. The task involves updating the classification to ensure consistency and accuracy regarding the terminology used.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/470", @@ -34147,6 +35329,7 @@ "node_id": "MDU6SXNzdWU0NTYzNjA3NTk=", "title": "Phetdavan.vc@gmail.com ", "body": "https://github.com/OWASP/Top10/issues/456#issuecomment-448975579", + "summary": "The issue discusses a potential update or clarification needed in the OWASP Top 10 documentation, specifically regarding the categorization and descriptions of certain vulnerabilities. It suggests that examples provided may not fully capture the nuances of the vulnerabilities listed, which could lead to misunderstandings among users. To address this, a thorough review of the current content and possible revisions or additions to the examples and descriptions is recommended.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/471", @@ -34173,6 +35356,7 @@ "node_id": "MDU6SXNzdWU0ODQwODAwNjE=", "title": "SQL LIMIT syntax is not an effective control against SQL injection", "body": "[Top 10-2017 A1-Injection](https://www.owasp.org/index.php/Top_10-2017_A1-Injection) says:\r\n\r\n Use LIMIT and other SQL controls within queries to prevent mass disclosure of records in case of SQL injection\r\n\r\nThis statement is problematic in that if an attacker is able to insert syntax into the SQL statement then they can simply bypass the `LIMIT` clause by injecting a comment, stacking queries, etc. This is due to the `LIMIT` statement occurring last in SQL syntax.\r\n\r\nWhile there may be cases where the attacker is limited in syntax and the `LIMIT` statement has some effect it seems rather pointless to recommend this without also recommending things that will lead to detection. This is primary screen real estate spent on a very ineffective control instead of a better control or even addressing one of the other attacks that are also listed on the page, but has no additional information:\r\n\r\n SQL, LDAP, XPath, or NoSQL queries, OS commands, XML parsers, SMTP headers, expression languages, and ORM queries.\r\n\r\nCheers!\r\n", + "summary": "The issue highlights the ineffectiveness of using SQL LIMIT as a safeguard against SQL injection attacks, referencing that attackers can bypass it by injecting comments or using other techniques. It argues that while LIMIT might offer some protection in limited scenarios, it does not address the broader issue of SQL injection effectively. The report suggests that resources should be allocated to more effective controls and detection methods rather than relying on LIMIT. It may be beneficial to reevaluate the recommendations regarding SQL injection defenses and consider integrating more robust security measures.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/474", @@ -34199,6 +35383,7 @@ "node_id": "MDU6SXNzdWU0OTkwNjY2Nzc=", "title": "Migrate OWASP Top 10 content from OWASP wiki to refresh", "body": "The current OWASP Wiki content needs to be cleaned up and translated into the new content format as described in the Leaders meeting at AppSec DC", + "summary": "The issue involves migrating and updating the OWASP Top 10 content from the OWASP wiki to align with a new content format discussed in a recent Leaders meeting. This requires cleaning up the existing content and ensuring it is properly translated into the specified format. The task ahead includes reviewing the current wiki entries and restructuring them as needed.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/476", @@ -34225,6 +35410,7 @@ "node_id": "MDU6SXNzdWU1MDMyMDEzMjI=", "title": "Change A9 to a general software supply chain category", "body": "Since 2013, A9 has been a single risk in a domain where there are hundreds of potential risks, many of which impact security.\r\n\r\nIMO, \"Using components with known vulnerabilities\" is too specific and continues to reinforce a fallacy that this is the only (or worst) of the security threats. It's not. A call for data in this category however, may reveal very little insight other than \"Using components with known vulnerabilities\". This is partially due to the limited scope and visibility that SCA tools have. \r\n\r\nThe [Component Analysis](https://www.owasp.org/index.php/Component_Analysis) wiki article contains about a dozen areas of concern.\r\n\r\nIn addition, the [OWASP Software Component Verification Standard](https://github.com/OWASP/Software-Component-Verification-Standard) (SCVS) currently has a list of about 100 activities, processes, and best practices that aim to reduce software supply chain risk. This project is just getting started but may provide some value in attempting to determine how to better categorize A9 in the future. \r\n", + "summary": "The issue discusses the need to change the designation of A9 from a specific risk, \"Using components with known vulnerabilities,\" to a broader category focused on software supply chain risks. The current labeling is considered too narrow and fails to acknowledge the multitude of risks that could affect security. It suggests that the existing scope of Software Composition Analysis (SCA) tools limits the understanding of these risks. The issue references resources like the Component Analysis wiki and the OWASP Software Component Verification Standard (SCVS) to illustrate the breadth of concerns and best practices in this domain. It implies that a reevaluation of A9 is necessary to better reflect the complexities of software supply chain risks. Further exploration of categorization based on these resources may be beneficial.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/477", @@ -34251,6 +35437,7 @@ "node_id": "MDU6SXNzdWU1MTQ0ODg2Nzk=", "title": "New top 10 series about network connection vulnerabilities", "body": "I noticed that OWASP recently released domain specific TOP 10 lists, for example the Top 10 IoT Vulnerabilities list.\r\n\r\nI googled but didn't find something specific for networking connections.\r\n\r\nSome common vulnerabilities I can think of are:\r\n\r\n* Missing end-to-end encryption\r\n* VPN leakage\r\n* Insecure Layer2/3 switch configuration\r\n\r\netc. etc.\r\nA top 10 list about networking would be great.", + "summary": "There is a suggestion for creating a new top 10 list focused on vulnerabilities related to network connections, similar to existing domain-specific lists from OWASP. The submitter has identified several common vulnerabilities, including missing end-to-end encryption, VPN leakage, and insecure Layer 2/3 switch configurations. It is proposed that developing this list could provide valuable insights into networking security issues. Further research and compilation of additional vulnerabilities may be needed to finalize the list.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/479", @@ -34277,6 +35464,7 @@ "node_id": "MDU6SXNzdWU1NDA1NjM3NjE=", "title": "Review recent PRs and commit only minor changes", "body": "", + "summary": "The issue suggests reviewing recent pull requests (PRs) and making only minor adjustments as necessary. It implies that these changes should be minimal and focused on improving the existing contributions without requiring major overhauls. A thorough review of the PRs is needed to identify specific areas for these minor edits.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/483", @@ -34307,6 +35495,7 @@ "node_id": "MDU6SXNzdWU1NDA1NjU4MDM=", "title": "Clean up translations and folder structure", "body": "Many of the recent PRs have not followed our layout structures. Please clean this up. ", + "summary": "The issue highlights inconsistencies in the translation files and folder structure resulting from recent pull requests. A cleanup is necessary to align these elements with the established layout guidelines. It is suggested to review the current organization and make the necessary adjustments to ensure compliance with the project's standards.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/485", @@ -34335,6 +35524,7 @@ "node_id": "MDU6SXNzdWU1NDMzMTMzMjg=", "title": "Vietnamese translation", "body": "I'm translating this version into Vietnamese. If someone is interested, please let me know. ", + "summary": "A user is working on translating the project into Vietnamese and has expressed willingness to collaborate with others interested in the translation effort. It may be beneficial to reach out and coordinate with the translator for any assistance or contributions regarding the Vietnamese version.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/488", @@ -34361,6 +35551,7 @@ "node_id": "MDU6SXNzdWU1ODM5MjkyMzY=", "title": "Proposal: Rename \"A4: XML External Entities\" to \"A4: Server-Side Request Forgery\"", "body": "The XML External Entities category seems oddly specific to me. It mostly focuses on a specific attack that is part of a broader and, in practice, more important category, which is Server-Side Request Forgery (SSRF). Most XXE vulnerabilities can be used to conduct an SSRF attack, whereas a lot of other SSRF vulnerabilities cannot be categorized as XXE attacks. SSRF received a ton of attention lately because of the capital one breach: e.g. see https://krebsonsecurity.com/2019/08/what-we-can-learn-from-the-capital-one-hack/ and search for SSRF\r\n\r\nQuotes: \"The type of vulnerability exploited by the intruder in the Capital One hack is a well-known method called a “Server Side Request Forgery” (SSRF) attack\", \"SSRF has become the most serious vulnerability facing organizations that use public clouds, [..]\"\r\n\r\nAs a response to this a lot of Cloud providers started major projects in this space: e.g. [Amazon](https://aws.amazon.com/blogs/security/defense-in-depth-open-firewalls-reverse-proxies-ssrf-vulnerabilities-ec2-instance-metadata-service/).\r\n\r\nA lot of this work and the underlying vulnerabilities cannot be easily categorized using the existing OWASP Top Ten. Thus, I'd like to propose to rename XXE to SSRF. The existing XXE vulnerabilities could still be part of this category, while the new category is broader and covers also other types of SSRF vulnerabilities.\r\n", + "summary": "The issue proposes renaming the \"A4: XML External Entities\" category to \"A4: Server-Side Request Forgery\" (SSRF). The author argues that the current designation is too narrow, as it primarily addresses a specific type of attack that falls under the wider umbrella of SSRF, which has gained significant attention due to recent high-profile breaches. The proposal emphasizes that while existing XXE vulnerabilities could still be included under this new category, it would allow for a more comprehensive coverage of various SSRF vulnerabilities. \n\nTo move forward, the community may need to evaluate the implications of this proposed change and assess how it aligns with current cybersecurity trends and vulnerabilities.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/490", @@ -34387,6 +35578,7 @@ "node_id": "MDU6SXNzdWU1OTUxNDAxMTA=", "title": "Broken OWASP URLs ", "body": "Hi team!\r\n\r\nI love the work you have done already with TOP 10. It is one of the most useful tools in the industry ❤️ \r\n\r\nSeems like OWASP has deprecated and re-located some content in the web. The TOP 10 still pointing to the deprecated version in many parts, [like here](https://github.com/OWASP/Top10/blob/master/2017/en/0xa1-injection.md). \r\n\r\nThe fix will be to move from `https://owasp.org/index.php/*` to `https://wiki.owasp.org/index.php/*`.\r\n\r\nIf you agree I can try to build a script and change the references across all the relevant files and folders and submit a PR with the changes.\r\n", + "summary": "The issue highlights that several URLs related to the OWASP TOP 10 have become broken due to the deprecation and relocation of content. It specifically points out that links still direct to outdated resources and suggests updating them from `https://owasp.org/index.php/*` to `https://wiki.owasp.org/index.php/*`. The author offers to create a script to update all relevant references and submit a pull request. \n\nTo address this, a review of all existing links in the TOP 10 documentation is needed to ensure they point to the correct, updated URLs.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/491", @@ -34413,6 +35605,7 @@ "node_id": "MDU6SXNzdWU5OTMwNjI4MTc=", "title": "JWT tokens should be invalidated on the server after logout (A1)", "body": "... there's definitively an explanation needed or a better phrasing. Otherwise people read this and start implement blacklists denylists 🤦🏼\r\n\r\nAnd:\r\n\r\n> Metadata manipulation, such as replaying or tampering with a JSON Web Token (JWT) access control token, or a cookie or hidden field manipulated to elevate privileges or abusing JWT invalidation.\r\n\r\nThis doesn't sound good to me. Never heard that a JWT or a cookie is labeled as metadata? In my world metadata is more often used as data outside a bunch of contained data which e.g. is encrypted. E.g. Encrypted e-mails still needs information from the sender and recipient a least. Same for HTTPS. \r\n\r\n", + "summary": "The issue raises concerns about the need for clear communication regarding the invalidation of JWT tokens on the server after logout. It suggests that the current phrasing might lead to confusion, particularly regarding the implementation of denial lists instead of blacklists. Additionally, there is a critique of the terminology used to describe JWTs and cookies as metadata, arguing that this classification is misleading. It may be necessary to revise the language used in the documentation to enhance clarity and accuracy.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/544", @@ -34443,6 +35636,7 @@ "node_id": "MDU6SXNzdWU5OTM4ODg2NzA=", "title": "A07:2021 Identification and Authentication Failure clarification needed", "body": "while translating A07:2021, we felt it was a bit weird on the following description:\r\n\r\nUses plain text, **encrypted,** or weakly hashed passwords (see A3:2017-Sensitive Data Exposure).\r\n\r\nI can understand plain text, weakly hashed password being part of protecting against failure of identification & authentication password, but encrypted password being part of it, it felt a bit weird. If encrypted password is used as is to gain access, that is understandable, but I though we might need better explanation or elaborate it a bit to make it more clear? ", + "summary": "The issue raises a concern regarding the clarity of the description related to A07:2021, specifically the inclusion of \"encrypted passwords\" alongside \"plain text\" and \"weakly hashed passwords\" in the context of identification and authentication failures. The author suggests that while plain text and weakly hashed passwords clearly relate to security failures, the mention of encrypted passwords may require further explanation to clarify their role in access control. It is recommended to provide a more detailed explanation to enhance understanding of how encrypted passwords fit into this category.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/553", @@ -34473,6 +35667,7 @@ "node_id": "I_kwDOA_2m3c475clv", "title": "Link with OWASP Integration Standards", "body": "", + "summary": "The issue suggests incorporating OWASP Integration Standards into the project. It emphasizes the importance of aligning with these standards to enhance security and best practices. Action items may include reviewing current integration methods and updating documentation or implementation strategies to ensure compliance with the OWASP guidelines.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/606", @@ -34504,6 +35699,7 @@ "node_id": "I_kwDOA_2m3c48BMJN", "title": "Arranging Translated Materials within 2021 doc repo", "body": "Currently due to the nature of mkdocs, the repo is kind of a mess with all the translated materials in one place, can we search for ways or alternatives to better structure the the doc folder for easier access to the materials. @sslHello ", + "summary": "The issue discusses the need for improved organization of translated materials within the 2021 documentation repository, as the current setup is chaotic due to mkdocs. It suggests exploring options or alternatives to better structure the documentation folder for easier access. A potential next step would be to evaluate different folder structures or tagging systems to enhance navigability.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/620", @@ -34519,8 +35715,8 @@ "repository": 1132, "assignees": [], "labels": [ - 229, - 236 + 236, + 229 ] } }, @@ -34533,6 +35729,7 @@ "node_id": "I_kwDOA_2m3c48B56t", "title": "lang=it @ owasp.org", "body": "che c...? ;-) \r\n \r\n![image](https://user-images.githubusercontent.com/8036727/134778100-e7405b00-47a5-49f6-80a5-f94b093477b7.png)\r\n\r\n", + "summary": "The issue presents a screenshot that is unclear and lacks context, prompting a request for clarification. To address this, it would be helpful to provide additional information or context regarding the content of the image.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/621", @@ -34561,6 +35758,7 @@ "node_id": "I_kwDOA_2m3c48F-35", "title": "Include API end points and API WebHooks in A7", "body": "In the case of communication, because of with all the automations happening out there, there is an increase in application-to-application communication (as supposed to user-to-application). \r\nWith this, there is an increase of API endpoints, in particular API webhooks listeners, which are exposed to the web without authentication. (API vs webhooks https://agilitycms.com/resources/posts/-api-vs-webhooks-what-s-the-difference )\r\n\r\nWe should have in A7 something to make aware of this. \r\nFor example, in Description, after \r\n\"There may be authentication weaknesses if the application:\r\nto have something on the lines : \r\n- \" Permits the communication with another application in an unauthenticated manner\"\r\n\r\nIn Prevention,ca have something on the lines of \r\n- \" to ensure API endpoints, including API webhooks, do not expose sensitive data without authentication\"\r\n", + "summary": "The issue discusses the need to enhance section A7 to address the rise in application-to-application communication and the associated risks of unauthenticated API endpoints and webhooks. It suggests adding specific language in the Description section to highlight potential authentication weaknesses and proposing preventive measures to ensure these endpoints do not expose sensitive data. To resolve this, updates should be made to both the Description and Prevention sections of A7 to reflect these concerns.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/626", @@ -34587,6 +35785,7 @@ "node_id": "I_kwDOA_2m3c48GBgW", "title": "Establish a numbering/reference standard", "body": "Similar to what's been done for ASVS and WSTG:\r\n- https://owasp.org/www-project-application-security-verification-standard/#how-to-reference-asvs-requirements\r\n- https://owasp.org/www-project-web-security-testing-guide/#how-to-reference-wstg-scenarios", + "summary": "The issue suggests the creation of a numbering or reference standard for a project, akin to the systems in place for the Application Security Verification Standard (ASVS) and the Web Security Testing Guide (WSTG). The author provides links to the reference methods used in ASVS and WSTG as examples. A potential action could involve developing a similar framework for referencing requirements or scenarios in the current project.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/627", @@ -34615,6 +35814,7 @@ "node_id": "I_kwDOA_2m3c48GFy0", "title": "If XSS is in A3: Injection, should we also include the controls to prevent XSS ? ", "body": "If XSS is now included in A3: Injection, should we also include in the \"prevention section the controls which prevent XSS , something like :\r\n_\"contextual encoding of the output to neutralize the characters which can facilitate injection\"_ \r\n\r\n", + "summary": "The issue raises a question about whether to include controls for preventing XSS in the A3: Injection category. It suggests that if XSS is classified under A3, relevant preventive measures, such as contextual encoding of output to mitigate injection risks, should also be added to the prevention section. To address this, a review of the current documentation may be necessary to incorporate appropriate controls for XSS prevention.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/628", @@ -34643,6 +35843,7 @@ "node_id": "I_kwDOA_2m3c48ONqi", "title": "In the next steps section, the second paragraph says \"four issues\", but then it only describes three", "body": "In the next steps section, the second paragraph says \"four issues\", but then it only describes three:\r\n+Code Quality issues\r\n+Denial of Service\r\n+Memory Management Errors", + "summary": "The issue points out a discrepancy in the \"next steps\" section where it mentions \"four issues\" but only lists three: Code Quality issues, Denial of Service, and Memory Management Errors. To resolve this, the missing fourth issue needs to be identified and included in the description.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/630", @@ -34673,6 +35874,7 @@ "node_id": "I_kwDOA_2m3c48imIe", "title": "Clarifying title", "body": "This project was renamed to OWASP Top 10 Risks in 2010. It is now often referred to \"Top 10\" alone.\r\n\r\nI think the next release would be a great opportunity to rename it to something more significant.\r\n\r\n## Top 10 \"Risks\"\r\n\r\nRisks is the **likelihood of a security threat to happen**. Therefore an SQL injection is not a risk. The possibility for private information being stolen or the possibility of some content being modified by an attacker are risks examples because they demonstrate a **threat**.\r\n\r\nDoes an XSS in a production system vs development system pose the same risk? It is the same vulnerability. It is the same software. The impact will likely be more important on the production environment.\r\n\r\n**Definitions:**\r\n- https://csrc.nist.gov/glossary/term/risk\r\n- https://www.iso.org/obp/ui#iso:std:iso:28004:-1:ed-1:v1:en:term:3.1\r\n- https://www.merriam-webster.com/dictionary/risk\r\n- https://dictionary.cambridge.org/fr/dictionnaire/anglais/risk\r\n\r\nIt is also important to link up with other fields of cyber security notably threat analysis and risk management.\r\n\r\n## Better alternatives\r\n\r\n - Top 10 Weaknesses\r\n - Top 10 Vulnerabilities\r\n - Top 10 Classes of vulnerabilities\r\n - Top 10 Weaknesses categories\r\n\r\n\r\n..What do you guys think?", + "summary": "The issue discusses the need to reconsider the naming convention of the project, currently known as \"Top 10 Risks,\" suggesting that the term \"Risks\" may not accurately represent the content. The author argues that \"risk\" should reflect the likelihood of security threats, providing examples to clarify this distinction. They propose alternative names such as \"Top 10 Weaknesses,\" \"Top 10 Vulnerabilities,\" and others that may better align with the project's focus. The author encourages input from the community on these suggestions. \n\nTo move forward, the project maintainers should consider these proposed names and gather feedback from the community.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/642", @@ -34701,6 +35903,7 @@ "node_id": "I_kwDOA_2m3c48tDGc", "title": "Edit Link in Template results in 404", "body": "If I use the pencil Icon shown below in the picture\r\n![image](https://user-images.githubusercontent.com/20317763/136222517-f35a03fa-709f-4224-b9ba-425ed8b95bc3.png)\r\nI will be send to this URL causing a 404\r\nhttps://github.com/OWASP/Top10/edit/master/docs/index.md\r\n\r\nLooking at the structure I assume this link \r\nhttps://github.com/OWASP/Top10/edit/master/2021/docs/index.md\r\nwas the targeted one. I assume a incorrect information inside of the Page generation", + "summary": "The issue reported involves a 404 error encountered when clicking the edit link (pencil icon) in a template. The current URL leads to a non-existent page, while it is suggested that the correct URL should reference a different path. It appears that there is incorrect information in the page generation that needs to be addressed to resolve this issue. Updating the link structure may be necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/643", @@ -34731,6 +35934,7 @@ "node_id": "I_kwDOA_2m3c4994HJ", "title": "Where is the new one-page infographic mentioned on the project page?", "body": "The project page https://owasp.org/Top10/ mentions:\r\n\r\n> The OWASP Top 10 2021 is all-new, with a new graphic design and an available one-page infographic you can print or obtain from our home page.\r\n\r\nBut the mentioned Infographic isn't linked and some searches did not turn up anything.\r\n\r\nIs this still missing or just missing the proper link?\r\nWhere can it be found?\r\n", + "summary": "The issue highlights the absence of a link to the one-page infographic referenced on the OWASP Top 10 project page. Despite mentions of its availability, users are unable to locate the infographic through searches or directly on the page. To resolve this, it may be necessary to verify whether the infographic is indeed missing or if it simply lacks a proper link, and to ensure that it is made accessible for users.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/660", @@ -34761,6 +35965,7 @@ "node_id": "I_kwDOA_2m3c4-ggRr", "title": "A mistake at A05:2021 - Security Misconfiguration", "body": "Hi, \r\nPlease see the description under Overview in A05:2021 - Security Misconfiguration. Do you want to say 208K CVEs? I think there are only 924 CWEs now. So did you accidentally make a mistake? \r\nLink: https://github.com/OWASP/Top10/blob/master/2021/docs/A05_2021-Security_Misconfiguration.md\r\nThanks,", + "summary": "The issue raises a concern regarding a potential error in the description under the Overview section of A05:2021 - Security Misconfiguration. It questions whether the mention of \"208K CVEs\" is correct, suggesting it might be a mistake since there are only 924 CWEs currently documented. To address this, a review of the content for accuracy is recommended.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/663", @@ -34787,6 +35992,7 @@ "node_id": "I_kwDOA_2m3c4_o6HO", "title": "Fix link in A5:2021-Security Misconfiguration", "body": "Thanks a lot for updating TOP 10 to 2021!\r\nThe link to threat \"A05:2021-Security Misconfiguration\" is self-referring, so you might want to correct it.", + "summary": "The issue highlights a problem with a self-referential link in the \"A5:2021-Security Misconfiguration\" section. It suggests that the link needs to be corrected to ensure proper navigation. The necessary action is to update the link to point to the correct resource.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/673", @@ -34813,6 +36019,7 @@ "node_id": "I_kwDOA_2m3c5BBp5P", "title": "Fix link in A04:Insecure Design", "body": "\"NIST – Guidelines on Minimum Standards for Developer Verification of Software\" in \"Insecure Design page\" is broken.", + "summary": "The issue reports a broken link to \"NIST – Guidelines on Minimum Standards for Developer Verification of Software\" on the \"Insecure Design\" page. To resolve this, the link needs to be updated or fixed to ensure it directs users to the correct resource.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/677", @@ -34839,6 +36046,7 @@ "node_id": "I_kwDOA_2m3c5ESwh3", "title": "Top 10 / 2021 / About OWASP - Google groups link is broken - fix link", "body": "In this page:\r\nhttps://github.com/OWASP/Top10/blob/master/2021/docs/A00-about-owasp.md\r\nThe 'Google Groups' text links to:\r\nhttps://github.com/OWASP/Top10/blob/master/2021/docs/TBA\r\nwhich lands on a 404", + "summary": "The issue reports a broken link in the OWASP Top 10 documentation for 2021. The 'Google Groups' text currently points to a URL that results in a 404 error. To resolve this, the link should be updated to direct users to the correct and active resource.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/691", @@ -34867,6 +36075,7 @@ "node_id": "I_kwDOA_2m3c5GEE5E", "title": "CWE-73 is not a part of A03:2021", "body": "Regarding the:\r\nhttps://owasp.org/Top10/A03_2021-Injection/#overview\r\n\r\nCWE-73 is mentioned as a notable CWE, but\r\n* when you browse CWE-73 on [CWE in Memberships section](https://cwe.mitre.org/data/definitions/73.html) you can see it is related to **A04:2021 Insecure Design**, not A03:2021\r\n* is not mentioned in a [list of mapped CWEs](https://owasp.org/Top10/A03_2021-Injection/#list-of-mapped-cwes)\r\n\r\nI'm not in position to discover what could it be mistaken with. My suggestion is to remove the third from notables or replace it with \r\n* [CWE-90 Improper Neutralization of Special Elements used in an LDAP Query ('LDAP Injection')](https://cwe.mitre.org/data/definitions/90.html), or\r\n* [CWE-77 Improper Neutralization of Special Elements used in a Command ('Command Injection')](https://cwe.mitre.org/data/definitions/77.html), or\r\n* [CWE-91 XML Injection (aka Blind XPath Injection)](https://cwe.mitre.org/data/definitions/91.html)\r\n(based on [references section](https://owasp.org/Top10/A03_2021-Injection/#references))\r\n\r\nWhen you come to agreement on what needs to be changed, I can prepare a PR for that.\r\n\r\nStay Secure!", + "summary": "The issue highlights that CWE-73 is incorrectly associated with A03:2021 Injection in the OWASP Top 10, as it is actually related to A04:2021 Insecure Design. The author suggests removing CWE-73 from the notable CWEs in A03:2021 or replacing it with other CWEs like CWE-90, CWE-77, or CWE-91, which are more relevant to injection vulnerabilities. It is recommended to review the current associations and make the necessary adjustments, and a pull request can be prepared once a decision is reached.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/694", @@ -34895,6 +36104,7 @@ "node_id": "I_kwDOA_2m3c5G-V3A", "title": "A05:2021-Security Misconfiguration: Average incidence percentage mismatches from index.md to A05_2021-Security_Misconfiguration.md", "body": "Hi\r\n\r\nIn the index.md A05:2021-Security Misconfiguration has this text:\r\n\r\n`[A05:2021-Security Misconfiguration](https://github.com/OWASP/Top10/blob/master/2021/docs/A05_2021-Security_Misconfiguration.md) moves up from https://github.com/OWASP/Top10/issues/6 in the previous edition; 90% of applications were tested for some form of misconfiguration, with an average incidence rate of `**4.5%**`, and over 208k occurrences of CWEs mapped to this risk category. With more shifts into highly configurable software, it's not surprising to see this category move up. The former category for A4:2017-XML External Entities (XXE) is now part of this risk category.`\r\n\r\nIn A05_2021-Security_Misconfiguration.md it saids 4.% \r\n\r\n`## Overview`\r\n\r\n`Moving up from #6 in the previous edition, 90% of applications were\r\ntested for some form of misconfiguration, with an average incidence rate of` **4.%,** \r\n\r\nDue to the dot, I think that the correct number is 4.5. \r\nI Appreciate if this can be confirmed. \r\n\r\nThank you,\r\nGerardo Canedo", + "summary": "The issue highlights a discrepancy in the average incidence percentage for A05:2021-Security Misconfiguration between the index.md and A05_2021-Security_Misconfiguration.md files. The index states the incidence rate as 4.5%, while the other document lists it as 4.%. It suggests that the correct figure should be 4.5%. Confirmation of this number is requested. \n\nTo resolve this, the average incidence percentage should be verified and corrected in the documentation where necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/697", @@ -34923,6 +36133,7 @@ "node_id": "I_kwDOA_2m3c5IrAU-", "title": "A2:2021 contains CWE-259 in overview, however is member of A7:2021", "body": "Hi\r\n\r\nI was surprised to read A2:2021 where [*CWE-259: Use of Hard-coded Password* is mentioned as a notable CWE](https://github.com/OWASP/Top10/blob/fb884b3e8a77c65e61824e99ac62ce456ae20628/2021/docs/A02_2021-Cryptographic_Failures.md?plain=1#L15) in the overview, however CWE-259 does not appear in the [*List of Mapped CWEs*](https://github.com/OWASP/Top10/blob/fb884b3e8a77c65e61824e99ac62ce456ae20628/2021/docs/A02_2021-Cryptographic_Failures.md?plain=1#L171) section.\r\n\r\nIt turns out that [CWE-259 is a member of A7:2021](https://github.com/OWASP/Top10/blob/fb884b3e8a77c65e61824e99ac62ce456ae20628/2021/docs/A07_2021-Identification_and_Authentication_Failures.md?plain=1#L131).\r\n\r\nI feel like either the membership of CWE-259 should change from A7:2021 to A2:2021, or the overview text of A2:2021 should be changed to not include CWE-259.\r\n\r\nI'm really sorry if I'm missing something obvious. I was just confused by this discrepancy and I'd be happy to help correct it.", + "summary": "The issue highlights a discrepancy in the OWASP Top 10 documentation where CWE-259 (Use of Hard-coded Password) is mentioned in the overview of A2:2021 (Cryptographic Failures) but is not listed in the mapped CWEs for that section. Instead, CWE-259 is categorized under A7:2021 (Identification and Authentication Failures). The suggestion is to either reassign CWE-259 to A2:2021 or modify the overview of A2:2021 to exclude CWE-259. A review and clarification of the categorization would be beneficial to resolve this confusion.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/717", @@ -34953,6 +36164,7 @@ "node_id": "I_kwDOA_2m3c5KJbpU", "title": "A04:2021 – Insecure Design: Easily Phished Communications?", "body": "[Here, halfway down the page under heading _Example Attack Scenarios_](https://owasp.org/Top10/A04_2021-Insecure_Design/) are 3 scenarios that would fall into the _Insecure Design_ category.\r\n\r\nI've received Phishing training at work and the basic idea is to avoid clicking links in emails, especially if they lead to a log in page where you'd put in credentials.\r\n\r\nHow about on the other end of it, if you have a big customer base who might log in to your application, it might be the most user-friendly workflow to send them an email with a link to have them log in, as they might want to do in response to getting notified about a problem or news regarding their user account.\r\n\r\nIf the application routinely sends out legitimate emails with log in links, I'd speculate it might be 10-100 times more likely to obtain access to an account during a phishing campaign.\r\n\r\nI can recall some cases where a company will put out language like, \"we will never ask for your login credentials via email.\" I'm not sure if that's just a good practice or indication that there is some understanding, that there could be a problem with _legitimate_ communications that appear to possibly be phishing attempts.\r\n\r\nSo I wonder if this kind of design, routine emails, or texts, with login links and a call to action to click and sign on, would that be considered _Insecure Design_ under the OWASP/Top10 categorization?", + "summary": "The issue discusses the potential risks associated with sending routine emails that include login links to users, especially in the context of phishing. It highlights that while these communications are intended to be user-friendly, they could unintentionally increase the likelihood of successful phishing attacks, as users might be conditioned to trust such emails. The author questions whether this practice falls under the category of \"Insecure Design\" as defined by OWASP's Top 10. \n\nTo address the concern, it may be beneficial to evaluate the current communication design and consider implementing safer alternatives, such as using unique, time-limited tokens for login links or providing clear warnings about phishing attempts in communications.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/720", @@ -34975,10 +36187,11 @@ "pk": 1229, "fields": { "nest_created_at": "2024-09-11T21:29:19.981Z", - "nest_updated_at": "2024-09-12T02:29:42.588Z", + "nest_updated_at": "2024-09-13T16:27:22.786Z", "node_id": "I_kwDOA_2m3c5L4aw_", "title": "Broken link on Insecure Design page", "body": "The link is on https://owasp.org/Top10/A04_2021-Insecure_Design/\r\nThe link says \"OWASP Cheat Sheet: Secure Design Principles\" and takes me to \r\n(https://owasp.org/Top10/A04_2021-Insecure_Design/Coming%20Soon)\r\n\r\nI realize that it takes me to a page that isn't done yet, but it doesn't even say \"Coming Soon.\" It has a few black rectangles, some indecipherable graphics, some unstyled html, before ending with a 404 message.\r\n\r\nGood luck.", + "summary": "There is a broken link on the Insecure Design page of the OWASP Top 10 that points to a non-functional page. The link labeled \"OWASP Cheat Sheet: Secure Design Principles\" redirects to a page that leads to a 404 error without any indication that the content is under development. It would be beneficial to either fix the link or update the destination page to clearly communicate its status.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/723", @@ -35005,6 +36218,7 @@ "node_id": "I_kwDOA_2m3c5NdNpC", "title": "Misleading description in A02:2021, should be moved to A07:2021", "body": "In [A02:2021 - Cryptographic Failures](https://owasp.org/Top10/A02_2021-Cryptographic_Failures/), under the Description section, it states:\r\n\r\n- Is the received server certificate and the trust chain properly validated?\r\n\r\nI believe that this statement is in the wrong OT10 item should be (re)moved.\r\n\r\nIf you look at the corresponding CWE, this is primarily a case of [CWE-296: Improper Following of a Certificate's Chain of Trust](https://cwe.mitre.org/data/definitions/296.html). It has little, if anything, to do with a cryptographic failure, but rather it is an _authentication_ failure as CWE-296 makes obvious if you follow the CWE chain to its parent CWE-295.\r\n\r\nI believe (and I think MITRE would agree) that this bullet item that I referenced is an _authentication_ failure. specifically, it is a failure of properly authenticating the host you are intending to connect to over a TLS connection. Indeed, I believe a better fit for this statement would be to move it [A07:2021](https://owasp.org/Top10/A07_2021-Identification_and_Authentication_Failures/).\r\n", + "summary": "The issue highlights a misleading description in the A02:2021 - Cryptographic Failures section of the OWASP Top 10 list. The specific statement about validating the received server certificate and its trust chain is argued to be inaccurately categorized as a cryptographic failure. Instead, it is suggested that this statement pertains to authentication failures, aligning more closely with CWE-296 and its parent category, CWE-295. The recommendation is to move this bullet point to A07:2021 - Identification and Authentication Failures for a more accurate classification. \n\nTo address this, the description in A02:2021 should be reviewed and potentially revised or relocated to ensure proper categorization.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/724", @@ -35033,6 +36247,7 @@ "node_id": "I_kwDOA_2m3c5ThI3L", "title": "OWASP top 10 and zero-day-attack", "body": "Hello everyone\r\n\r\nIs OWASP Top 10 methodology useful for zero-day attacks?\r\n\r\n", + "summary": "The issue raises a question about the applicability of the OWASP Top 10 methodology in the context of zero-day attacks. It suggests a need to explore whether the principles outlined in the OWASP Top 10 can effectively address or mitigate the risks associated with these types of vulnerabilities. To move forward, a discussion or analysis on the relationship between the OWASP Top 10 and zero-day attack prevention strategies may be necessary.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/729", @@ -35059,6 +36274,7 @@ "node_id": "I_kwDOA_2m3c5aO-GH", "title": "Hindi translation of Top10", "body": "Currently working on creating Hindi translation of TOP 10\r\n", + "summary": "The issue involves the development of a Hindi translation for the \"Top 10\" content. It requires focused efforts to complete the translation. Collaboration or contributions from others may be beneficial to expedite this process.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/734", @@ -35085,6 +36301,7 @@ "node_id": "I_kwDOA_2m3c5bLRie", "title": "OWASP Top10", "body": "well i install this git but didn't worked.\r\ni cant open with bash. help", + "summary": "The user is experiencing issues with opening a GitHub project related to OWASP Top 10 using bash. They mention that the installation did not work as expected. It would be helpful to provide troubleshooting steps, such as checking the installation process or ensuring that bash is properly configured to run the project.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/737", @@ -35111,6 +36328,7 @@ "node_id": "I_kwDOA_2m3c5ejvin", "title": "Minor typo A01: Broken access control > Example attack scenario #2", "body": "A01: Broken Access Control\r\nSection: Example Attack Scenarios\r\n\r\nScenario # 2 reads: An attacker simply forces browses to target URLs. Admin rights are required for access to the admin page.\r\n\r\n^^(copied and pasted)\r\n\r\nI believe this should be either \"force\" (no 's') browses OR forces \"browsers\" (add 'r'), depending on which one was intended.", + "summary": "The issue identifies a minor typo in the \"Example Attack Scenarios\" section under A01: Broken Access Control. It suggests that the phrase \"forces browses\" should be corrected to either \"force browses\" or \"forces browsers,\" clarifying the intended meaning. The suggested action is to update the text to resolve the typo.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/739", @@ -35137,6 +36355,7 @@ "node_id": "I_kwDOA_2m3c5fWveI", "title": "pt_BR: Translate A11 and fix some links", "body": "Hi,\r\n1) Translate the last missing file A11 to pt_BT, please.\r\n2) MKdocs reports the following warnings:\r\n* WARNING - Documentation file 'A05_2021-Security_Misconfiguration.pt_BR.md' contains a link to 'A06_2021 Vulnerable_and_Outdated_Components.md' which is not found in the documentation files.\r\n* WARNING - Documentation file 'A10_2021-Server-Side_Request_Forgery_(SSRF).pt_BR.md' contains a link to 'A09_2021-Security_Logging_and_Monitoring_Failures.pt-BR.md' which is not found in the documentation files.\r\n\r\nThanks and cheers\r\nTorsten", + "summary": "The issue requests the translation of the last missing file, A11, into Brazilian Portuguese (pt_BR). Additionally, it highlights warnings from MKdocs regarding broken links in the documentation files A05 and A10, which reference other files that cannot be found. It is suggested to both complete the translation and address the broken links in the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/754", @@ -35163,6 +36382,7 @@ "node_id": "I_kwDOA_2m3c5n1UL8", "title": "(Not so) Secret Formular", "body": "Hello OWASP Team,\r\nI am writing my bachelor thesis about the owasp top 10 and have problems to understand the calculation for the rankings.\r\nI saw the calc sheet from [this](https://www.youtube.com/live/REHMOYriUFQ?feature=share&t=3098) presentation but if I try to calculate the score I dont get the results. \r\n\r\nCould you provide the (excel?) sheet for more transparency or explain how the calculations is exaclty done for one example?\r\n\r\n", + "summary": "The issue discusses difficulties in understanding the calculation of rankings for the OWASP Top 10 project, specifically in relation to a spreadsheet used in a presentation. The user requests either the Excel sheet for clarity or a detailed explanation of the calculation process using an example. To address the issue, providing the requested spreadsheet or a step-by-step calculation guide would be beneficial.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/765", @@ -35189,6 +36409,7 @@ "node_id": "I_kwDOA_2m3c5zFdS6", "title": "Missing links in some languages", "body": "For example, on the [A06:2021 – Vulnerable and Outdated Components page](https://owasp.org/Top10/A06_2021-Vulnerable_and_Outdated_Components/) in the French language, the links that redirect to the corresponding articles should be added.\r\n\r\nThis\r\n\r\n![example1](https://github.com/OWASP/Top10/assets/132226893/fe501615-bf5c-4812-8c43-414d39be03d3)\r\n![example2](https://github.com/OWASP/Top10/assets/132226893/92c6e2f9-6ed2-4ab1-8cc9-30921cf330f8)\r\n\r\nShould look like this\r\n\r\n![example3](https://github.com/OWASP/Top10/assets/132226893/827e9fdd-01b7-41c2-906e-f060ea65b38a)\r\n![example4](https://github.com/OWASP/Top10/assets/132226893/03eded1c-b151-48fa-91e6-ffca43ed3e94)\r\n\r\nAnd that happens in some languages, I don't know if it also happens in the different pages within the top 10 (A01, A02, A03, A04, etc.).", + "summary": "The issue highlights that certain language versions of the OWASP Top 10 pages, specifically the French version of the A06:2021 – Vulnerable and Outdated Components page, are missing links that should redirect to corresponding articles. The issue includes examples of how the current state looks and how it should appear with the links added. Additionally, it raises the possibility that similar link issues may exist on other pages within the top 10 (A01, A02, A03, A04, etc.). \n\nTo address this, the relevant links need to be added to the affected language pages.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/775", @@ -35215,6 +36436,7 @@ "node_id": "I_kwDOA_2m3c55uyD3", "title": "OWASP - AI and senstivite data handling", "body": "AI applications often require large datasets for training, which may include sensitive personal information. Unauthorized access or misuse of this data can result in privacy breaches.", + "summary": "The issue highlights concerns regarding the handling of sensitive personal data in AI applications, particularly during the training phase where large datasets are used. It emphasizes the risks of unauthorized access and misuse, which can lead to privacy violations. To address this, it may be necessary to implement stricter data protection measures and guidelines for managing sensitive information in AI development.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/778", @@ -35241,6 +36463,7 @@ "node_id": "I_kwDOA_2m3c553PPn", "title": "Comparison to \"2023 CWE Top 10 KEV Weaknesses\"", "body": "Can we include a comparison to the [2023 CWE Top 10 KEV Weaknesses](https://twitter.com/CweCapec/status/1735331969288794343) within the next release please?", + "summary": "The issue requests the inclusion of a comparison to the 2023 CWE Top 10 KEV Weaknesses in the next release. It suggests that this addition could enhance the content, indicating that relevant adjustments or updates to existing materials may be necessary to accommodate this comparison.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/779", @@ -35267,6 +36490,7 @@ "node_id": "I_kwDOA_2m3c59TX_0", "title": "Wrong \"Factors\" in the Italian version of A06:2021 – Vulnerable and Outdated Components", "body": "The values inside the \"Factors\" table of A06_2021 are wrong (some definitions are swapped):\r\nhttps://owasp.org/Top10/it/A06_2021-Vulnerable_and_Outdated_Components/\r\n\r\n![Screenshot_20240126_142913](https://github.com/OWASP/Top10/assets/9741596/277cef4f-be6d-4283-8d4b-777df97a2f61)\r\n\r\nIn details:\r\n- 51.78% should be \"Copertura Max\" instead of \"Sfruttabilità pesata\"\r\n- 22.47% should be \"Copertura Media\" instead of \"Impatto medio\"\r\n- 5.0 should be \"Sfruttabilità media pesata\" instead of \"Copertura Max\"\r\n- 5.0 should be \"Impatto medio pesato\" instead of \"Copertura media\"\r\n", + "summary": "The issue reports incorrect values in the \"Factors\" table of the Italian version of A06:2021 – Vulnerable and Outdated Components. Specifically, several definitions are swapped, leading to misrepresented percentages. The affected values and their correct labels are outlined, indicating a need for correction in the translation to ensure accurate information is presented. To resolve this, the table entries should be updated according to the specified corrections.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/780", @@ -35293,6 +36517,7 @@ "node_id": "I_kwDOA_2m3c6HQZAx", "title": "Remove example about default credentials from A05 2021", "body": "As example for security misconfiguration is listed \"Default accounts and their passwords are still enabled and unchanged\". However, this is part of A07, specifically CWE-1392 Use of Default Credentials.\r\n\r\nI suggest to remove this line:\r\nhttps://github.com/OWASP/Top10/blob/7c7288f5d7a4222c44b2df8fa8799ad1fb1a43da/2021/docs/A05_2021-Security_Misconfiguration.md?plain=1#L27", + "summary": "The issue suggests removing a specific line from the A05 2021 document that mentions \"Default accounts and their passwords are still enabled and unchanged\" as an example of security misconfiguration. The author argues that this example actually belongs under A07, which addresses the use of default credentials. It is recommended to update the document by removing the highlighted line to ensure accurate categorization.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/783", @@ -35319,6 +36544,7 @@ "node_id": "I_kwDOA_2m3c6H1ZTo", "title": "About OWASP 2021 - License and acronym update", "body": "In the About OWASP of the 2021 version:\r\n\r\n- line 3: _The Open Web Application Security Project (OWASP)_ should be updated to _The Open **Worldwide** Application Security Project (OWASP)_\r\n- line 35: License year should be updated\r\n\r\nLicense year is inconsistent across translations.", + "summary": "The issue highlights the need to update the name of OWASP in the 2021 documentation from \"Open Web Application Security Project\" to \"Open Worldwide Application Security Project.\" Additionally, the license year is inconsistent and requires an update across various translations. It is suggested to ensure uniformity in the license year throughout all documentation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Top10/issues/784", @@ -35345,6 +36571,7 @@ "node_id": "MDU6SXNzdWUzNjM0MzMwNTk=", "title": "Merge and automate!", "body": "- [ ] - Let's redistribute all apps to their own repository and setup basic pipeline, same holds for the crackmes\r\n- [x] - Let's add the Kottlin app\r\n- [ ] - Let's automate every build with Travis (using what is there in https://github.com/commjoen/uncrackable_app)\r\n- [ ] - Let's move it to be part of the MSTG (and create links to it)\r\n- [ ] - Let's remove all binaries from the MSTG, including the write-ups and put those here.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/MASTG-Hacking-Playground/issues/4", @@ -35373,6 +36600,7 @@ "node_id": "MDU6SXNzdWU1MDE1MDY3NTI=", "title": "include social items", "body": "Please add:\r\n\r\n- [X] Code of conduct (similar to mstg)\r\n- [x] Contributing.md\r\n- [x] PR template\r\n- [ ] instructions on release communication steps.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/MASTG-Hacking-Playground/issues/15", @@ -35399,6 +36627,7 @@ "node_id": "MDU6SXNzdWU1MjkxNDI0MDY=", "title": "Docker Hub", "body": "Put a container at docker hub & make sure it does not run as root for Ruby on Rails API", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/MASTG-Hacking-Playground/issues/18", @@ -35425,6 +36654,7 @@ "node_id": "MDU6SXNzdWU1NDY1NjY2NTI=", "title": "Test case for Instant Apps", "body": "See https://github.com/OWASP/owasp-mstg/issues/1625", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/MASTG-Hacking-Playground/issues/21", @@ -35451,6 +36681,7 @@ "node_id": "MDU6SXNzdWU2NTcxMTA0OTY=", "title": "AndroidKotlin: releases and Android Studio built apk behave differently ", "body": "AndroidKotlin: releases and Android Studio built apk behave differently in Nexus5X 7.1.1 \r\n\r\n\r\n[https://github.com/OWASP/MSTG-Hacking-Playground/releases/download/1.0/MSTG-Android-Kotlin.apk](url) \r\ndoes not show the Login or Register Activity. \r\n\r\nBut the Android Studio built version is showing a login and register Activity. \r\n\r\n\r\nAlso for the releases apk\r\nSharedPreferencesImplementation does not work as expected. An xml file is not present. \r\n\r\n\r\nPlease let me know if more information is needed.\r\n\r\n\r\n\r\nThanks,\r\nTsubasa", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/MASTG-Hacking-Playground/issues/29", @@ -35477,6 +36708,7 @@ "node_id": "MDU6SXNzdWU2NjE2MzEyNDg=", "title": "iOS Swift App: loading a file that doesn't exist", "body": "## Project\r\n\r\niOS Swift App(self build from current master brunch)\r\n\r\n## Question\r\n\r\n[At this line](https://github.com/OWASP/MSTG-Hacking-Playground/blob/39c4db7bf721dd73125e330ac9dc64c336838179/iOS/MSTG-JWT/MSTG-JWT/WebViewController.swift#L43), WebViewController loads \"test.html\" but I couldn't find the file in this repository.\r\nWhich files should be loaded?\r\n\r\nThanks,", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/MASTG-Hacking-Playground/issues/30", @@ -35503,6 +36735,7 @@ "node_id": "MDU6SXNzdWU3MjUyNDUyNzA=", "title": "raili-api: docker image doesn", "body": "Docker image doesn't work and fails with the following error.\r\n\r\nCommand to create Docker image: \r\n```\r\n❯ pwd\r\n/Users/user/MSTG-Hacking-Playground/Serverside/rails-api-original\r\n❯ docker build -t mstg-hacking-playground-rail-api .\r\n```\r\nCommand to run docker container: \r\n\r\n```\r\n❯ docker run --name mstg-rail-api -p 80:3000 mstg-hacking-playground-rail-api\r\n'/' is not writable.\r\nBundler will use `/tmp/bundler20201020-1-1husepz1' as your home directory temporarily.\r\n=> Booting Puma\r\n=> Rails 5.1.4 application starting in development\r\n=> Run `rails server -h` for more startup options\r\nPuma starting in single mode...\r\n* Version 3.11.0 (ruby 2.4.6-p354), codename: Love Song\r\n* Min threads: 5, max threads: 5\r\n* Environment: development\r\n* Listening on tcp://0.0.0.0:3000\r\nUse Ctrl-C to stop\r\nStarted POST \"/auth/login\" for 172.17.0.1 at 2020-10-20 06:22:23 +0000\r\n\r\nActiveRecord::PendingMigrationError (\r\n\r\nMigrations are pending. To resolve this issue, run:\r\n\r\n bin/rails db:migrate RAILS_ENV=development\r\n\r\n):\r\n\r\nactiverecord (5.1.4) lib/active_record/migration.rb:576:in 'check_pending!'\r\nactiverecord (5.1.4) lib/active_record/migration.rb:553:in 'call'\r\nactionpack (5.1.4) lib/action_dispatch/middleware/callbacks.rb:26:in 'block in call'\r\nactivesupport (5.1.4) lib/active_support/callbacks.rb:97:in 'run_callbacks'\r\nactionpack (5.1.4) lib/action_dispatch/middleware/callbacks.rb:24:in 'call'\r\nactionpack (5.1.4) lib/action_dispatch/middleware/executor.rb:12:in 'call'\r\nactionpack (5.1.4) lib/action_dispatch/middleware/debug_exceptions.rb:59:in 'call'\r\nactionpack (5.1.4) lib/action_dispatch/middleware/show_exceptions.rb:31:in 'call'\r\nrailties (5.1.4) lib/rails/rack/logger.rb:36:in 'call_app'\r\nrailties (5.1.4) lib/rails/rack/logger.rb:24:in 'block in call'\r\nactivesupport (5.1.4) lib/active_support/tagged_logging.rb:69:in 'block in tagged'\r\nactivesupport (5.1.4) lib/active_support/tagged_logging.rb:26:in 'tagged'\r\nactivesupport (5.1.4) lib/active_support/tagged_logging.rb:69:in 'tagged'\r\nrailties (5.1.4) lib/rails/rack/logger.rb:24:in 'call'\r\nactionpack (5.1.4) lib/action_dispatch/middleware/remote_ip.rb:79:in 'call'\r\nactionpack (5.1.4) lib/action_dispatch/middleware/request_id.rb:25:in 'call'\r\nrack (2.0.3) lib/rack/runtime.rb:22:in 'call'\r\nactivesupport (5.1.4) lib/active_support/cache/strategy/local_cache_middleware.rb:27:in 'call'\r\nactionpack (5.1.4) lib/action_dispatch/middleware/executor.rb:12:in 'call'\r\nactionpack (5.1.4) lib/action_dispatch/middleware/static.rb:125:in 'call'\r\nrack (2.0.3) lib/rack/sendfile.rb:111:in 'call'\r\nrailties (5.1.4) lib/rails/engine.rb:522:in 'call'\r\npuma (3.11.0) lib/puma/configuration.rb:225:in 'call'\r\npuma (3.11.0) lib/puma/server.rb:624:in 'handle_request'\r\npuma (3.11.0) lib/puma/server.rb:438:in 'process_client'\r\npuma (3.11.0) lib/puma/server.rb:302:in 'block in run'\r\npuma (3.11.0) lib/puma/thread_pool.rb:120:in 'block in spawn_thread'\r\n```", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/MASTG-Hacking-Playground/issues/31", @@ -35525,10 +36758,11 @@ "pk": 1250, "fields": { "nest_created_at": "2024-09-11T21:29:45.729Z", - "nest_updated_at": "2024-09-11T21:29:45.729Z", + "nest_updated_at": "2024-09-13T04:23:58.320Z", "node_id": "I_kwDOA838Ks5SJhry", "title": "Port All Android Java App Use Cases to the Kotlin App", "body": "The MASTG Hacking playground apps differ in functionality. We'd like to keep working on the Kotlin app going forward and for that we need to port everything from the Java app and ensure it works in Kotlin on the latest Android version.\r\n\r\nPorting the UI elements should be evaluated but it's possibly not worth the effort. Maybe redoing them using the current Android UI recommendations is more adequate. In the end the screens were very simple.\r\n\r\nThe code on the Java app is very repetitive (as it is for the UI elements). Please consider optimizing it as much as possible to enable easier creation of new use cases in the future. If possible, create some templates.\r\n\r\nThe list of test cases is here: https://github.com/OWASP/MASTG-Hacking-Playground/wiki/Android-App\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/MASTG-Hacking-Playground/issues/39", @@ -35559,6 +36793,7 @@ "node_id": "MDU6SXNzdWU1NTEwMDMxNjY=", "title": "add a redirect ability", "body": "after scanning the qr code redirect to some link or whatsapp group", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/76", @@ -35589,6 +36824,7 @@ "node_id": "MDU6SXNzdWU3NDgxOTEyMTc=", "title": "QRLJacker Sessions captured automatically, none of them loads", "body": "QRLJacker sessions captured automatically even without scanning. None of them loads.\r\nonly 1 session captured from scanning but it does not load. When i enter the code \"sessions -i 4\" it goes to Whatsapp web page loads and remains in the Whatsapp QR code page.\r\n![Wa](https://user-images.githubusercontent.com/55798020/99898253-08ef9d80-2cc6-11eb-9795-1d90c8471f2b.png)\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/128", @@ -35619,6 +36855,7 @@ "node_id": "MDU6SXNzdWU5ODc0Mjc3NTc=", "title": "Update installation steps...", "body": "Plese, Update compatible installation steps for Parrotsec 4.11.2 and / or Kali 2021.2.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/158", @@ -35647,6 +36884,7 @@ "node_id": "I_kwDOA8L0X847bPTZ", "title": "nothing happened after scanning the code ", "body": "i use python 3.9 and all thing work with me but when i scan the code , whatsapp said code error ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/160", @@ -35675,6 +36913,7 @@ "node_id": "I_kwDOA8L0X84-s9nO", "title": "run is not recognized as an internal command ! - Error", "body": "```\r\nQrlJacker Module(grabber/whatsapp) > options\r\n\r\n Name Current value Required Description\r\n port 1337 Yes The local port to listen on.\r\n host 0.0.0.0 Yes The local host to listen on.\r\n useragent (default) Yes Make useragent is the (default) one, a (random) generated useragent or a specifed useragent\r\n```\r\n\r\nI did everything you said in the **Installing instructions**\r\nBut when I type `run` it is saying `[!] run is not recognized as an internal command !`\r\n\r\n```\r\nQrlJacker Module(grabber/whatsapp) > run\r\n[!] run is not recognized as an internal command !\r\n[+] Maybe you meant : , runreload, refresh, resource\r\n[+] Type help or ? to learn more..\r\n```\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/164", @@ -35701,6 +36940,7 @@ "node_id": "I_kwDOA8L0X85CZSMU", "title": "Selenium Driver error", "body": "Show browser error even after installing all the requirements and firefox, I think script need to be updated", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/166", @@ -35727,6 +36967,7 @@ "node_id": "I_kwDOA8L0X85C-gBa", "title": "Couldn't open Firefox! Check the installation instructions again!", "body": "Anyone else getting this issue?\r\n\r\nMy current Firefox version is 91.5.0esr should be up to date.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/168", @@ -35753,6 +36994,7 @@ "node_id": "I_kwDOA8L0X85EcMdK", "title": "EDIT: I made the template and that is on my repo || Please add the whatsapp web template pre installed", "body": "i just wanted a preinstalled WhatsApp web template \r\ncause the preinstalled template sucks!\r\nbecause everyone don't have much time nor much skills do design one \r\njust to save your time you can do the follow\r\n![image](https://user-images.githubusercontent.com/94917588/155350211-c7e0d3a1-589d-48f3-8bb1-96a3b0ac40ad.png)\r\n\r\nbut i am unable to figure out how to add the qr code to the downloaded html file \r\nif anyone knows how to add the qr code to the downloaded file please help me out or you can make a repository to provide \r\nthe base login page template for the qrl jacker\r\n\r\nany of your help is appreciated \r\nthank you\r\nand sorry for my bad english", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/169", @@ -35779,6 +37021,7 @@ "node_id": "I_kwDOA8L0X85EdDQT", "title": "Have some porblems with QRLJacking", "body": "Hello guys,can somebody help me to find solution of this problem?\r\nWhen i starting QRLJacking and write \"list\" and any other words have this error:\r\nQrlJacker > list\r\nfree(): invalid pointer\r\nzsh: IOT instruction python3 QrlJacker.py\r\nThanks for your answers)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/170", @@ -35805,6 +37048,7 @@ "node_id": "I_kwDOA8L0X85GdpPD", "title": "YouTube Demonstration Video Removed - Broken Link", "body": "The link to the video demonstration on YouTube has been removed, so the link is broken.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/171", @@ -35831,6 +37075,7 @@ "node_id": "I_kwDOA8L0X85GpTNa", "title": "QR", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/172", @@ -35857,6 +37102,7 @@ "node_id": "I_kwDOA8L0X85LSsOF", "title": "This tool doesn't work anymore!", "body": "It was working very well in v1 but in v2 its a total mess getting this error:\r\n\r\n`\r\nQrlJacker > use grabber/whatsapp\r\n\r\n< Module(grabber/whatsapp) > run\r\n[+] Using the default useragent\r\n[+] Running a thread to keep the QR image [whatsapp]\r\nException in thread QR updater thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n[+] Waiting for sessions on whatsapp\r\n[+] Running a thread to detect Idle once it happens then click the QR reload button [whatsapp]\r\nException in thread Idle detector thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n[+] Initializing webserver... [whatsapp]\r\nException in thread Webserver manager thread:\r\nTraceback (most recent call last): File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n\r\nQrlJacker Module(grabber/whatsapp) > self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 946, in run\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 946, in run\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 946, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/QRLJacking/QRLJacker/core/browser.py\", line 149, in check_img\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/QRLJacking/QRLJacker/core/browser.py\", line 167, in serve_module\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/QRLJacking/QRLJacker/core/browser.py\", line 133, in website_qr\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n self.browsers[module_name][\"host\"] = \"http://\"+host\r\nKeyError: 'whatsapp'\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n`", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/174", @@ -35883,6 +37129,7 @@ "node_id": "I_kwDOA8L0X85LTxtF", "title": "Session saved successfully", "body": "if i run QRLJacking comes this:\r\n[+] Got session on whatsapp module\r\n[+] Session saved successfully\r\n\r\nbut i dont scan the code if i scan the code i only becom a txt on my phone \r\n\r\nif i start with debug mode:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n\r\n[+] Got session on whatsapp module\r\n[+] Profile location saved in profiles/Tue-Jun--7-11:19:47-2022.pf\r\n[+] Session saved successfully\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 946, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Desktop/QRLJacking/QRLJacker/core/browser.py\", line 170, in serve_module\r\n webserver.start_serving(host)\r\n File \"/home/kali/Desktop/QRLJacking/QRLJacker/core/module_utils.py\", line 32, in start_serving\r\n self.httpd = ReusableTCPServer( (host, self.port), MyHandler)\r\n File \"/usr/lib/python3.10/socketserver.py\", line 452, in __init__\r\n self.server_bind()\r\n File \"/usr/lib/python3.10/socketserver.py\", line 466, in server_bind\r\n self.socket.bind(self.server_address)\r\nOSError: [Errno 98] Address already in use\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/175", @@ -35909,6 +37156,7 @@ "node_id": "I_kwDOA8L0X85OZsxw", "title": "Browser not launch due to KeyError: 'whatsapp' error. Suggestion to just download the QR code to users home dir?", "body": "I get this error when I’m trying to launch the tool. Many are saying that the problem exists in geckodriver or Firefox that’s why it doesn’t show up. Is there any way the developer could make it so it doesn’t automatically have to launch Firefox and serve the QR code on a webserver? Instead, maybe make the tool create the malformed QR code and outputs it in the users directory. User then can manually create a phishing page or whatever they want during social engineering stage. It’ll be up to them. Such an awesome tool. Would be a shame if everybody forgets about this tool\r\n\r\nQrlJacker > use grabber/whatsapp\r\n\r\n< Module(grabber/whatsapp) > run\r\n[+] Using the default useragent\r\n[+] Running a thread to keep the QR image [whatsapp]\r\nException in thread QR updater thread:\r\nTraceback (most recent call last):\r\nFile \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n[+] Waiting for sessions on whatsapp\r\n[+] Running a thread to detect Idle once it happens then click the QR reload button [whatsapp]\r\nException in thread Idle detector thread:\r\nTraceback (most recent call last):\r\nFile \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n[+] Initializing webserver... [whatsapp]\r\nException in thread Webserver manager thread:\r\nTraceback (most recent call last): File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n\r\nQrlJacker Module(grabber/whatsapp) > self.run()\r\nFile \"/usr/lib/python3.10/threading.py\", line 946, in run\r\nself.run()\r\nFile \"/usr/lib/python3.10/threading.py\", line 946, in run\r\nself.run()\r\nFile \"/usr/lib/python3.10/threading.py\", line 946, in run\r\nself._target(*self._args, **self._kwargs)\r\nFile \"/home/kali/QRLJacking/QRLJacker/core/browser.py\", line 149, in check_img\r\nself._target(*self._args, **self._kwargs)\r\nFile \"/home/kali/QRLJacking/QRLJacker/core/browser.py\", line 167, in serve_module\r\nself._target(*self._args, **self._kwargs)\r\nFile \"/home/kali/QRLJacking/QRLJacker/core/browser.py\", line 133, in website_qr\r\ncontroller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\nself.browsers[module_name][\"host\"] = \"http://\"+host\r\nKeyError: 'whatsapp'\r\ncontroller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/176", @@ -35935,6 +37183,7 @@ "node_id": "I_kwDOA8L0X85PULdM", "title": "Does not work on windows", "body": "Says readline is not found and if installed it says readline is not for windows", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/177", @@ -35961,6 +37210,7 @@ "node_id": "I_kwDOA8L0X85PpaTP", "title": "QrlJacker Module(grabber/whatsapp) > run is failed ", "body": "After entering run command failed to open \r\n\r\n[+] Using the default useragent\r\n\r\n[+] Running a thread to keep the QR image [whatsapp]\r\nException in thread QR updater thread:\r\n[+] Waiting for sessions on whatsapp\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n[+] Running a thread to detect Idle once it happens then click the QR reload button [whatsapp]\r\nException in thread Idle detector thread:\r\n[+] Initializing webserver... [whatsapp]\r\nTraceback (most recent call last):\r\n\r\nQrlJacker Module(grabber/whatsapp) > self.run()\r\n\r\nException in thread Webserver manager thread:\r\n File \"/usr/lib/python3.10/threading.py\", line 946, in run\r\n\r\nQrlJacker Module(grabber/whatsapp) > Traceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/immortal/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 133, in website_qr\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 946, in run\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 946, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/immortal/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 149, in check_img\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/immortal/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 167, in serve_module\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n self.browsers[module_name][\"host\"] = \"http://\"+host\r\nKeyError: 'whatsapp'\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/178", @@ -35987,6 +37237,7 @@ "node_id": "I_kwDOA8L0X85Pvf-B", "title": "QRLJacking QR code is to visible in firefox", "body": "I was to trying to use QRLJacking recently. But whenever I run it the QR code is not visible in Firefox and after a while it shows error like this in the terminal. \r\n![Screenshot from 2022-08-13 10-27-56](https://user-images.githubusercontent.com/77933538/184468485-dfaa4458-67c8-4d49-b295-528a2669e958.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/179", @@ -36013,6 +37264,7 @@ "node_id": "I_kwDOA8L0X85QEr0i", "title": "im getting this error", "body": "QrlJacker Module(grabber/whatsapp) > run\r\n[+] Using the default useragent\r\n[+] Running a thread to keep the QR image [whatsapp]\r\nException in thread QR updater thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 946, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/mrawm/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 133, in website_qr\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n[+] Waiting for sessions on whatsapp\r\n[+] Running a thread to detect Idle once it happens then click the QR reload button [whatsapp]\r\nException in thread Idle detector thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 946, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/mrawm/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 149, in check_img\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n[+] Initializing webserver... [whatsapp]\r\n\r\nQrlJacker Module(grabber/whatsapp) > Exception in thread Webserver manager thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1009, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 946, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/mrawm/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 167, in serve_module\r\n self.browsers[module_name][\"host\"] = \"http://\"+host\r\nKeyError: 'whatsapp'\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/180", @@ -36039,6 +37291,7 @@ "node_id": "I_kwDOA8L0X85QWKin", "title": "Not getting QRCODE", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/181", @@ -36065,6 +37318,7 @@ "node_id": "I_kwDOA8L0X85SFS5s", "title": "stuck on the whatsapp web page intro", "body": "I done the acquired session, but when i interact my session ID its only open the whatsapp web page. not with that chatbox. is there any problem here that is similar with me?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/182", @@ -36091,6 +37345,7 @@ "node_id": "I_kwDOA8L0X85Si-aC", "title": "Project Expired", "body": "Hi my friend, nice to meet you. You Script, for expired. Is coming. i can logged in. Using code qr, For unistalling. I unresult problems, for your script. \r\n\r\n\r\nLine is 39...... Is bad. for way ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/183", @@ -36117,6 +37372,7 @@ "node_id": "I_kwDOA8L0X85S3gip", "title": "QR code is not generating", "body": "QR code is not generating after the page launch", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/184", @@ -36143,6 +37399,7 @@ "node_id": "I_kwDOA8L0X85S6Cfc", "title": "Qrl code not generating and old file of qrljacker the page is not refreshing", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/185", @@ -36169,6 +37426,7 @@ "node_id": "I_kwDOA8L0X85TQ_Ze", "title": "repository not found ", "body": "remote: Repository not found.\r\nfatal: repository 'https://github.com/OWASP/QRJacking/' not found\r\n\r\nwhat can I do for this problem..!", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/186", @@ -36195,6 +37453,7 @@ "node_id": "I_kwDOA8L0X85ThKFP", "title": "I think project developer is busy 🤔 ", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/187", @@ -36221,6 +37480,7 @@ "node_id": "I_kwDOA8L0X85TxM8r", "title": "Error and not generating qr ", "body": "Exception in thread Webserver manager thread: \r\nTraceback (most recent call last): \r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner \r\n self.run() \r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run \r\n self._target(*self._args, **self._kwargs) \r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 170, in serve_module \r\n webserver.start_serving(host) \r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/module_utils.py\", line 32, in start_serving \r\n self.httpd = ReusableTCPServer( (host, self.port), MyHandler) \r\n File \"/usr/lib/python3.10/socketserver.py\", line 452, in __init__ \r\n self.server_bind() \r\n File \"/usr/lib/python3.10/socketserver.py\", line 466, in server_bind \r\n self.socket.bind(self.server_address) \r\nOSError: [Errno 98] Address already in use \r\n \r\n ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/188", @@ -36247,6 +37507,7 @@ "node_id": "I_kwDOA8L0X85UHtYa", "title": "Got stuck in this, unable to move further. Help me out.", "body": "![20221017_150521](https://user-images.githubusercontent.com/115987658/196152427-170324ad-8ee4-4eab-b5ab-61075a0c2723.jpg)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/189", @@ -36273,6 +37534,7 @@ "node_id": "I_kwDOA8L0X85Wf3hM", "title": "QR code is not showing ", "body": "I did everything but when I enter my local host in my Firefox browser it won’t show the QR code please can you help me out ?\r\n ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/191", @@ -36299,6 +37561,7 @@ "node_id": "I_kwDOA8L0X85X23g4", "title": "Any one Have solve this problem? ", "body": "QrlJacker Module(grabber/whatsapp) > run\r\n[+] Using the default useragent\r\n[+] Running a thread to keep the QR image [whatsapp]\r\nException in thread QR updater thread:\r\nTraceback (most recent call last):\r\n[+] Waiting for sessions on whatsapp\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n[+] Running a thread to detect Idle once it happens then click the QR reload button [whatsapp]\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 133, in website_qr\r\n[+] Initializing webserver... [whatsapp]\r\nException in thread Idle detector thread:\r\n\r\nQrlJacker Module(grabber/whatsapp) > Traceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\nException in thread Webserver manager thread:\r\nTraceback (most recent call last):\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 149, in check_img\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 167, in serve_module\r\n self.browsers[module_name][\"host\"] = \"http://\"+host\r\nKeyError: 'whatsapp'\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/192", @@ -36325,6 +37588,7 @@ "node_id": "I_kwDOA8L0X85X23kE", "title": "QR whats app", "body": "QrlJacker Module(grabber/whatsapp) > run\r\n[+] Using the default useragent\r\n[+] Running a thread to keep the QR image [whatsapp]\r\nException in thread QR updater thread:\r\nTraceback (most recent call last):\r\n[+] Waiting for sessions on whatsapp\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n[+] Running a thread to detect Idle once it happens then click the QR reload button [whatsapp]\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 133, in website_qr\r\n[+] Initializing webserver... [whatsapp]\r\nException in thread Idle detector thread:\r\n\r\nQrlJacker Module(grabber/whatsapp) > Traceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\nException in thread Webserver manager thread:\r\nTraceback (most recent call last):\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 149, in check_img\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 167, in serve_module\r\n self.browsers[module_name][\"host\"] = \"http://\"+host\r\nKeyError: 'whatsapp'\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/193", @@ -36351,6 +37615,7 @@ "node_id": "I_kwDOA8L0X85X23oJ", "title": " QRLJacking", "body": "QrlJacker Module(grabber/whatsapp) > run\r\n[+] Using the default useragent\r\n[+] Running a thread to keep the QR image [whatsapp]\r\nException in thread QR updater thread:\r\nTraceback (most recent call last):\r\n[+] Waiting for sessions on whatsapp\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n[+] Running a thread to detect Idle once it happens then click the QR reload button [whatsapp]\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 133, in website_qr\r\n[+] Initializing webserver... [whatsapp]\r\nException in thread Idle detector thread:\r\n\r\nQrlJacker Module(grabber/whatsapp) > Traceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\nException in thread Webserver manager thread:\r\nTraceback (most recent call last):\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 149, in check_img\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 167, in serve_module\r\n self.browsers[module_name][\"host\"] = \"http://\"+host\r\nKeyError: 'whatsapp'\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/194", @@ -36377,6 +37642,7 @@ "node_id": "I_kwDOA8L0X85Y6waT", "title": "Qrljacking ", "body": "https://github.com/OWASP/QRLJacking/tree/master", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/195", @@ -36403,6 +37669,7 @@ "node_id": "I_kwDOA8L0X85ZRBbN", "title": "run is not recognized as an internal command !", "body": "I receive this error when I try to run the \"run\" command on macos, something to do?\r\n\"Screenshot\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/196", @@ -36429,6 +37696,7 @@ "node_id": "I_kwDOA8L0X85bRURA", "title": "QRLJacking error while running the framework", "body": "Hi All,\r\nThis is Sameer, a beginner in cyber security, please someone help me in the below issue.\r\nQRLJacking framework is not running, gives an error that \"Couldn't open Firefox! Check the installation instructions again!\"\r\nError snapshot is attached below.\r\n![error QRLJacking](https://user-images.githubusercontent.com/122555364/212166254-33c4870d-2d00-4ba3-8c5a-bc0c4b1835c5.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/197", @@ -36455,6 +37723,7 @@ "node_id": "I_kwDOA8L0X85c9ezv", "title": "keyerror whatsapp qrljacker", "body": "\r\nQrlJacker Module(grabber/whatsapp) > run\r\n[+] Using the default useragent\r\n[+] Running a thread to keep the QR image [whatsapp]\r\nException in thread QR updater thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n[+] Waiting for sessions on whatsapp\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 133, in website_qr\r\n[+] Running a thread to detect Idle once it happens then click the QR reload button [whatsapp]\r\nException in thread Idle detector thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 149, in check_img\r\n controller = self.browsers[module_name][\"Controller\"]\r\nKeyError: 'whatsapp'\r\n[+] Initializing webserver... [whatsapp]\r\n\r\nQrlJacker Module(grabber/whatsapp) > Exception in thread Webserver manager thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/kali/Downloads/QRLJacking/QRLJacker/core/browser.py\", line 167, in serve_module\r\n self.browsers[module_name][\"host\"] = \"http://\"+host\r\nKeyError: 'whatsapp'\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/198", @@ -36481,6 +37750,7 @@ "node_id": "I_kwDOA8L0X85dNweA", "title": "Run command error", "body": "Exception in thread Webserver manager thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/root/QRLJacking/QRLJacker/core/browser.py\", line 167, in serve_module\r\n self.browsers[module_name][\"host\"] = \"http://\"+host\r\nKeyError: 'whatsapp'\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/199", @@ -36503,10 +37773,11 @@ "pk": 1287, "fields": { "nest_created_at": "2024-09-11T21:30:26.600Z", - "nest_updated_at": "2024-09-11T21:30:26.600Z", + "nest_updated_at": "2024-09-13T16:15:12.422Z", "node_id": "I_kwDOA8L0X85dgabh", "title": "QR code not loading", "body": "Hi I am getting below logs and the QR code is not loading. Could anyone please help me.\r\n\r\n\r\n\r\nQrlJacker Module(grabber/whatsapp) > run\r\n[+] Using the default useragent\r\n[+] Running a thread to keep the QR image [whatsapp]\r\n[+] Waiting for sessions on whatsapp\r\n[+] Running a thread to detect Idle once it happens then click the QR reload button\r\n[whatsapp]\r\n[+] Initializing webserver... [whatsapp]\r\n
QrlJacker Module(grabber/whatsapp) > ----------------------------------------\r\nException occurred during processing of request from ('127.0.0.1', 43532)\r\nTraceback (most recent call last):\r\nFile \"/usr/lib/python3.10/http/server.py\", line 727, in send_head\r\nf = open(path, 'rb')\r\nFileNotFoundError: [Errno 2] No such file or directory: '/home/rockstar005\r\nDownloads/QRLJacking/QRLJacker/core/www/whatsapp/tmp.png'\r\n
During handling of the above exception, another exception occurred:\r\n
Traceback (most recent call last):\r\nFile \"/usr/lib/python3.10/socketserver.py\", line 316, in _handle_request_noblock\r\nself.process_request(request, client_address)\r\nFile \"/usr/lib/python3.10/socketserver.py\", line 347, in process_request\r\nself.finish_request(request, client_address)\r\nFile \"/usr/lib/python3.10/socketserver.py\", line 360, in finish_request\r\nself.RequestHandlerClass(request, client_address, self)\r\nFile \"/home/rockstar005/Downloads/QRLJacking/QRLJacker/core\r\nmodule_utils.py\", line 27, in __init__\r\nsuper().__init__(*args, directory=serve_dir, **kwargs)\r\nFile \"/usr/lib/python3.10/http/server.py\", line 668, in __init__\r\nsuper().__init__(*args, **kwargs)\r\nFile \"/usr/lib/python3.10/socketserver.py\", line 747, in __init__\r\nself.handle()\r\nFile \"/usr/lib/python3.10/http/server.py\", line 433, in handle\r\nself.handle_one_request()\r\nFile \"/usr/lib/python3.10/http/server.py\", line 421, in handle_one_request\r\nmethod()\r\nFile \"/usr/lib/python3.10/http/server.py\", line 672, in do_GET\r\nf = self.send_head()\r\nFile \"/usr/lib/python3.10/http/server.py\", line 729, in send_head\r\nself.send_error(HTTPStatus.NOT_FOUND, \"File not found\")File \"/usr/lib/python3.10/http/server.py\", line 488, in send_error\r\nself.wfile.write(body)\r\nFile \"/usr/lib/python3.10/socketserver.py\", line 826, in write\r\nself._sock.sendall(b)\r\nBrokenPipeError: [Errno 32] Broken pipe\r\n----------------------------------------\r\n

QrlJacker Module(grabber/whatsapp) > ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/200", @@ -36533,6 +37804,7 @@ "node_id": "I_kwDOA8L0X85gVnxo", "title": "QR code not loading", "body": "QR Code not loading in firefox or another browser. When enabling debug and running the code whatsapp \"add device\" code pops up but it is not the attack it is directly from whats app. \r\n debug\r\n[+] Debug mode enabled!\r\n\r\n run\r\n[+] Using the default useragent\r\n[+] Running a thread to keep the QR image [whatsapp]\r\n[+] Waiting for sessions on whatsapp\r\n[+] Running a thread to detect Idle once it happens then click the QR reload button [whatsapp]\r\n[+] Initializing webserver... [whatsapp]\r\n\r\n ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/202", @@ -36559,6 +37831,7 @@ "node_id": "I_kwDOA8L0X85jUS18", "title": "Lebaran", "body": " \n \n \n     Document \n      \n      \n \n \n \n      \n         
 
\n         
 
\n         
 
\n         
 
\n         
 
\n         
 
\n         
 
\n         
 
\n         
 
\n          \n       \n       \n         

.: Selamat Hari Raya Idul Fitri :.

\n         
\n         Mohon maaf lahir dan batin \n      \n \n ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/207", @@ -36585,6 +37858,7 @@ "node_id": "I_kwDOA8L0X85l6tW9", "title": "Firefox", "body": "\r\nwhen i run QRLJacker and get to the loading screen with the default settings,if i try a test and go to run,firefox opens but only in my task bar,it will open as the the icon and i cannot get it to launch into the browser.\r\nany suggestions as i feel i'm now pretty close to getting this working.\r\nmany thanks.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/214", @@ -36611,6 +37885,7 @@ "node_id": "I_kwDOA8L0X85oNUsS", "title": "I want to know how to hack a phone", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/216", @@ -36637,6 +37912,7 @@ "node_id": "I_kwDOA8L0X85rZzxm", "title": "Resolvendo o Problema de navegador. E outros problemas do programa QRJacking", "body": "Estou usando a versão do Kali Linux 2022.4\r\nTive muitos problemas para poder rodar este aplicativo tive que procurar soluções em diversos locais na web. Até que finalmente consegui encontrar a solução para o bom funcionamento e instalação do aplicativo. \r\nCá vai o guia..\r\n\r\nAtualize o navegador Firefox para a versão mais recente\r\nInstale o geckodriver mais recente (é obrigatório usar a versão mais recente do geckodriver para que funcione neste exato momento usei a versão 33 ) em https://github.com/mozilla/geckodriver/releases e extraia o arquivo e faça:\r\nchmod +x geckodriver\r\nsudo mv -f geckodriver /usr/local/share/geckodriver\r\nsudo ln -s /usr/local/share/geckodriver /usr/local/bin/geckodriver\r\nsudo ln -s /usr/local/share/geckodriver /usr/bin/geckodriver\r\nClone o repo com git clone https://github.com/OWASP/QRLJackingentão faça\r\ncd QRLJacking/QRLJacker\r\nInstale todos os requisitos compip install -r requirements.txt\r\n Instale a versão mais recente do Microsoft firefox neste momento estou usando a versão 115.0.2.\r\nOBS: Para que está tendo erro executando o comando run \"run command not found\" ou \"firefox not found\" pode ser por estar a usar a versão antiga do navegador firefox.\r\nOu deve constar se dentro desta pasta /usr/lib/pyhton3/dist-packages/selenium/webdriver/firefox/ tem o arquivo \"webdriver_prefs.json\r\nCaso não tenha este arquivo baixe neste link https://github.com/Cyberw1ng/Introduction/blob/main/webdriver_prefs.json \r\nEm seguida copie o arquivo webdriver_prefs.json para a pasta /usr/lib/pyhton3/dist-packages/selenium/webdriver/firefox/.\r\n\r\nEm seguida pode executar o programa.\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/217", @@ -36663,6 +37939,7 @@ "node_id": "I_kwDOA8L0X85r8ll0", "title": "bug in open firefox", "body": "![Captura de pantalla (447)](https://github.com/OWASP/QRLJacking/assets/117747051/543d4bb6-3134-4d12-8c23-0bee9362dde7)\r\nhello, someone was able to solve the problem with the geckodriver in firefox since I did it correctly in kali purple and it does not allow me to execute it, I don't know if it can be seen in the image but it tells me\r\n1. \"using the default user agent\"\r\n2. \"couldn't open firefox! check the installation instructions again!\"\r\nand then I followed the steps as I repeat and the same thing appears to me, if someone could help me please or if the creator could it would be great :) thanks", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/218", @@ -36689,6 +37966,7 @@ "node_id": "I_kwDOA8L0X85vPD8V", "title": "Whatsapp show Browser active", "body": "Whatsapp show the browser used in the linked devices.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/219", @@ -36715,6 +37993,7 @@ "node_id": "I_kwDOA8L0X85w0EMH", "title": "5340106557", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/220", @@ -36741,6 +38020,7 @@ "node_id": "I_kwDOA8L0X85xi9lr", "title": "SOLUTION NEEDED CAN ANY ON HELP?", "body": "rlJacker Module(grabber/whatsapp) > run\r\n[+] Using the default useragent\r\n[!] Couldn't open Firefox! Check the installation instructions again!", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/222", @@ -36767,6 +38047,7 @@ "node_id": "I_kwDOA8L0X85yenld", "title": "I cant run qrl in wsl1 and My laptop doest support wsl2 and snap is not working.", "body": "I try to run QRLjacker in wsl1 but when I run it says cant open Firefox I try to set localhost and access the page through local IP address .Could you please help me how to do it in wsl1?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/223", @@ -36793,6 +38074,7 @@ "node_id": "I_kwDOA8L0X85zyFbK", "title": "Unable to open Sessions ", "body": "Whenever I entered the command sessions -i 2 or 1 or 0 after getting successfully the sessions\r\nIt shows that:-\r\n[+]Starting interaction with (2)\r\n[!] Couldn't set write permission back for previous profile path \r\n[W] Invalid permissions can produce unexpected errors\r\n[!] Couldn't open browser to view session!\r\n![Screenshot at 2023-10-14 00-54-31](https://github.com/OWASP/QRLJacking/assets/93839106/98c73640-ba65-4403-a731-87b370bedcef)\r\n\r\n\r\nHow to solve this issue ?\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/224", @@ -36819,6 +38101,7 @@ "node_id": "I_kwDOA8L0X85z_3EU", "title": "Current code is no longer compatible with new Selenium releases ", "body": "Seems like Selenium 4.10 changed a lot of code from previous versions.\r\nTake a look at: https://github.com/SeleniumHQ/selenium/commit/9f5801c82fb3be3d5850707c46c3f8176e3ccd8e\r\n\r\nThe result is that QRLJaker is no longer working (again).\r\nThe only think i can suggest at the moment is:\r\n\r\n* downgrade your Selenium version `pip install \"selenium<=4.9.0\"`\r\n* please look careful at the install instructions: https://github.com/OWASP/QRLJacking/blob/master/QRLJacker/README.md\r\n* be sure to download the right version of Geckodriver for your architecture (run `uname -a` command): https://github.com/mozilla/geckodriver/releases\r\n\r\nFor now, following the previous instruction, QRLJacker should work fine (i just tested it on various machines).\r\n\r\nI will try my best to update QRLJaker again and make it compatible with newer versions of Selenium.\r\nI think it will take some time to address the problem because is not easy and I'm quite busy at the moment.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/225", @@ -36847,6 +38130,7 @@ "node_id": "I_kwDOA8L0X850wQwQ", "title": "Unable to open sessions", "body": "![Screenshot_2023-10-24_01-49-01](https://github.com/OWASP/QRLJacking/assets/81898157/6bf20a93-f66b-4e85-91f8-a81a9669c9e5)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/227", @@ -36873,6 +38157,7 @@ "node_id": "I_kwDOA8L0X852CCNP", "title": "Couldn't open Firefox! Check the installation instructions again!", "body": "I am getting this error when I try to \"Run\" , and I have tried everything possible but still doesn't work for some reason! please help!\r\n\r\n![Uploading Couldn't open Firefox! Check the installation instructions again!.PNG…]()\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/229", @@ -36899,6 +38184,7 @@ "node_id": "I_kwDOA8L0X853YCAD", "title": "COULDN'T OPEN THE FIREFOX CHECK THE INSTALLATION INSTRUCTIONS AGAIN!", "body": "SIR, I TRIED EVERYTHING WHATEVER I CAN DO BUT IT IS STILL NOT WORKING AND THERE ARE MANY PEOPLE WHO IS SUFFERING FROM THIS SAME ISSUE JUST FIGURE THIS OUT AS SOON AS POSSIBLE AND THE ISSUE IS VERY CLEAR .....COULDN'T OPEN THE FIREFOX ! CHECK THE INSTALLATION INSTRUCTIONS AGAIN!", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/230", @@ -36925,6 +38211,7 @@ "node_id": "I_kwDOA8L0X854kDL2", "title": "No starting session after log out", "body": "After log out, when I start the same session by ID I have only a Firefox window with WhatsApp web and qr code, if I don't log out when I start the session open all chats. Now, I would know if is it a simple rule or I have some trouble with grabber. So someone can answer to my question?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/231", @@ -36951,6 +38238,7 @@ "node_id": "I_kwDOA8L0X854ns9H", "title": "session not captured issue", "body": "All the things works fine for me when i scan the qrl code then the session is not captured why ??", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/232", @@ -36977,6 +38265,7 @@ "node_id": "I_kwDOA8L0X8547fHH", "title": "For discord?", "body": "Can i use this to discord account Hacking if i can then how?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/233", @@ -37003,6 +38292,7 @@ "node_id": "I_kwDOA8L0X855OE2a", "title": "I solved this problem....Soon i will share you the solution....", "body": " I solved this problem....Soon i will share you the solution....\r\n\r\n\r\nOn Sat, Feb 4, 2023 at 7:22 PM owais1900 ***@***.***> wrote:\r\n\r\n> [image: IMG_20230204_192122]\r\n> \r\n> Helo couldn't open firefox\r\n>\r\n> —\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you commented.Message ID:\r\n> ***@***.***>\r\n>\r\n\r\n_Originally posted by @ayushis2ayush in https://github.com/OWASP/QRLJacking/issues/168#issuecomment-1416794892_\r\n ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/234", @@ -37029,6 +38319,7 @@ "node_id": "I_kwDOA8L0X8554aOP", "title": "Sessions not being captured in QRLJacking", "body": "so i have followed all the instructions regarding the steps for QRL jacking, i made it into the qr code but when i scan it. my terminal does not show the sessions. how do i fix that", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/235", @@ -37055,6 +38346,7 @@ "node_id": "I_kwDOA8L0X858hCdz", "title": "Is this project dead ?", "body": "Im using arm7 phone and made it work with unrooted termux and xfce desktop to export display to vnc i got the sessions captured but when i eter any conversation on firefox in the vnc firefox crashes immediately \r\n\r\nthis is the geckodriver traceback\r\n\r\nCrash Annotation GraphicsCriticalError: |[C0][GFX1-]: CompositorBridgeChild receives IPC close with reason=AbnormalShutdown (t=80.6358) Exiting due to channel error.\r\nExiting due to channel error.\r\n[1705551185835](tel:1705551185835) geckodriver INFO Listening on 127.0.0.1:39869\r\n[1705551195254](tel:1705551195254) mozrunner::runner INFO Running command: MOZ_CRASHREPORTER=\"1\" MOZ_CRASHREPORTER_NO_REPORT=\"1\" MOZ_CRASHREPORTER_SHUTDOWN=\"1\" MOZ_NO_REMOTE=\"1\" \"/dat ... 46607\" \"--remote-allow-hosts\" \"localhost\" \"-no-remote\" \"-profile\" \"/data/data/com.termux/files/>\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nconsole.warn: services.settings: Ignoring preference override of remote settings server\r\nconsole.warn: services.settings: Allow by setting MOZ_REMOTE_SETTINGS_DEVTOOLS=1 in the environment\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\nRead port: 39865\r\n[1705551197478](tel:1705551197478) Marionette INFO Marionette enabled\r\nRead port: 39865\r\nRead port: 39865\r\n[1705551197732](tel:1705551197732) Marionette INFO Listening on port 36577\r\nRead port: 36577\r\nWebDriver BiDi listening on ws://127.0.0.1:46607\r\n[1705551198233](tel:1705551198233) RemoteAgent WARN TLS certificate errors will be ignored for this session\r\nconsole.error: ({})\r\nconsole.error: \"Translations: SIMD not supported\" (new CompileError(\"at offset 15: bad type\", \"chrome://global/content/translations/simd-detect-worker.js\", 14))\r\nDevTools listening on ws://127.0.0.1:46607/devtools/browser/a45657dd-1c54-44ff-ba84-056b9b8dd960\r\nJavaScript warning: https://web.whatsapp.com/main.0cf917fc9b3d8f353e54.js, line 5: unreachable code after return statement\r\nJavaScript warning: https://web.whatsapp.com/main.0cf917fc9b3d8f353e54.js, line 5: unreachable code after return statement\r\nJavaScript error: resource://gre/modules/PromiseWorker.sys.mjs, line 102: Error: Could not get children of file(/data/data/com.termux/files/usr/tmp/rust_mozprofileSvOYg0/thumbnails) because it does not exist\r\nExiting due to channel error.\r\nExiting due to channel error.\r\n\r\n\r\nHow can i fix it or any work out to use this tool on the phone browser not the vncviewer ?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/240", @@ -37081,6 +38373,7 @@ "node_id": "I_kwDOA8L0X858uDlG", "title": "Please any developer can help me no one is answering?", "body": "Can i make this tool to work on android phone using android sdk and my firefox app ? But i dint know which codes to edit .. i tied it on vnc and it crash whenever i enter any conversation #240 ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/241", @@ -37107,6 +38400,7 @@ "node_id": "I_kwDOA8L0X86Ag83Y", "title": "Showing errror when run is entered", "body": "[+] Using useragent chrome\r\n[!] Couldn't open Firefox! Check the installation instructions again!\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/242", @@ -37133,6 +38427,7 @@ "node_id": "I_kwDOA8L0X86MZKl-", "title": "error encounter during execution of QrlJacker.py", "body": "can anyone solve this problem\r\n\r\nQrlJacker > use grabber/whatsapp\r\n\r\nQrlJacker Module(grabber/whatsapp) > set port 1337\r\n[+] port => 1337\r\n\r\nQrlJacker Module(grabber/whatsapp) > run\r\n[!] Couldn't find Firefox file path!\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/243", @@ -37159,6 +38454,7 @@ "node_id": "I_kwDOA8L0X86SKotS", "title": "QR CODE SCANNING FAILURE", "body": "Everything seems to work fine but if i scan the qrl, i will recieve the following error \"can't link new devices at this time. try again later. \r\nCan somebody help me out with this, but if i scan whatsapp web url that doesn't originates from QRLJacker is works.\r\n![IMG_3216](https://github.com/user-attachments/assets/a57173d7-5b54-4326-94dd-c1daeefafda4)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/244", @@ -37185,6 +38481,7 @@ "node_id": "I_kwDOA8L0X86T__Zx", "title": "Not session after scan QR ", "body": "Hi, all! May be you help me, pls. \r\n\r\nI install step by step this framework. So ok, but only i scan qr on page QRLJacking, i see session in my WhatsApp, but in QRLJacking in terminal write: \"No captured sessions\" (if i write comand \"sessions\"). But in WhatsApp session i see that its active and last time it.\r\n\r\nWhats problem?\r\n\r\nBig thx!", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/245", @@ -37211,6 +38508,7 @@ "node_id": "I_kwDOA8L0X86V1NSf", "title": "Good day people", "body": "I don't know if this tool still works, the QR code is not coming up on the whatsapp web server. Please do anyone has a way out?\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/QRLJacking/issues/246", @@ -37233,10 +38531,11 @@ "pk": 1315, "fields": { "nest_created_at": "2024-09-11T21:31:04.677Z", - "nest_updated_at": "2024-09-11T21:31:04.677Z", + "nest_updated_at": "2024-09-13T16:15:24.937Z", "node_id": "MDU6SXNzdWU0NjU3MjE2MDY=", "title": "in apache2 js file,css file not loading shoing only html content", "body": "![Screenshot from 2019-07-09 16-44-47](https://user-images.githubusercontent.com/12762732/60883865-ff000280-a268-11e9-8597-aadf9d27fb6f.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/OWASPWebGoatPHP/issues/6", @@ -37263,6 +38562,7 @@ "node_id": "MDU6SXNzdWUzMzA2NTIzNzQ=", "title": "Create & Publish \"Request for Threat Models\"", "body": "Write post to community asking for TM (anonymised or samples)\r\n\r\nto go on\r\n - twitter \r\n - linked in\r\n - email\r\n - blog post", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-model-cookbook/issues/2", @@ -37291,6 +38591,7 @@ "node_id": "MDU6SXNzdWU0NzU1MjM2MjE=", "title": "Contact previous contributors of this repo", "body": "Contact people who previously contributed/interacted with this repo to inform them that an official project is being started.\r\n\r\nThey might just see this so https://www.owasp.org/index.php/OWASP_Threat_Model_Cookbook !\r\n\r\nBut the plan was to have Tash contact them with a more personalized touch 😄.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-model-cookbook/issues/18", @@ -37321,6 +38622,7 @@ "node_id": "MDU6SXNzdWU1NjAwMjY0MjM=", "title": "Convert jetscout diagrams from analoge diagram to digital", "body": "![image](https://user-images.githubusercontent.com/656739/73794597-b7127100-47a0-11ea-95b0-94e19b3b9b51.png)\r\n\r\nhttps://github.com/OWASP/threat-model-cookbook/tree/master/Flow%20Diagram/jetscout\r\n\r\n![image](https://user-images.githubusercontent.com/656739/73794550-9813df00-47a0-11ea-9fb0-c3b0d1a666c7.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-model-cookbook/issues/23", @@ -37336,8 +38638,8 @@ "repository": 1142, "assignees": [], "labels": [ - 247, - 248 + 248, + 247 ] } }, @@ -37350,6 +38652,7 @@ "node_id": "MDU6SXNzdWU1NjAwMzMyMTI=", "title": "convert sokify from analogue to digital", "body": "![image](https://user-images.githubusercontent.com/656739/73795677-1e312500-47a3-11ea-9380-9de87cecf2de.png)\r\n\r\n\r\nSee files at https://github.com/filetrust/threat-model-cookbook/tree/master/Flow%20Diagram/sokify", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-model-cookbook/issues/24", @@ -37365,8 +38668,8 @@ "repository": 1142, "assignees": [], "labels": [ - 247, - 248 + 248, + 247 ] } }, @@ -37379,6 +38682,7 @@ "node_id": "I_kwDOA5eWBc49Ix82", "title": "Examples of Identified Threats, Risk Ratings, and Mitigations?", "body": "These are really useful assets for demonstrating how to diagraming the system, \"What are we working on\". \r\n\r\nI don't see any artifacts that capture the threats that have been identified by these models (steps 2 and 3 of a a threat model). i.e., What can go wrong? What can we do about it? \r\n\r\nHave I overlooked something, or is there a plan to add examples of a completed threat model exercise? \r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-model-cookbook/issues/36", @@ -37401,10 +38705,11 @@ "pk": 1321, "fields": { "nest_created_at": "2024-09-11T21:31:24.992Z", - "nest_updated_at": "2024-09-11T21:31:24.992Z", + "nest_updated_at": "2024-09-13T16:15:34.833Z", "node_id": "I_kwDOA5eWBc5lpyJS", "title": "Archive this project or warn contributors", "body": "The Threat Model Cookbook project has been dormant for a long time, and the [www pages](https://owasp.org/www-project-threat-model-cookbook/) have been archived\r\n\r\n@alyssa-hardin should this repo should also be archived read-only or instead the contributors warned that it is a dormant project?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/threat-model-cookbook/issues/37", @@ -37431,6 +38736,7 @@ "node_id": "MDU6SXNzdWUxNTkxMzczODI=", "title": "Add page 'saved' status UI feedback", "body": "The idea is to give the user a sense if there is unsaved data, and to provide feedback when it is save\n\nrelated to #41\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/44", @@ -37460,6 +38766,7 @@ "node_id": "MDU6SXNzdWUxNTkxNDkxMTA=", "title": "add contact page", "body": "with link to this repo Readme and issues pages (to open issues)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/51", @@ -37489,6 +38796,7 @@ "node_id": "MDU6SXNzdWUxNTkxNTQ0MzI=", "title": "Only use d3 script includes on pages that need it", "body": "At the moment (due to a bug) the js and css files where added to index \n\n![image](https://cloud.githubusercontent.com/assets/656739/15894684/239e9744-2d7f-11e6-91fd-c299941607ca.png)\n\nWhich is not the best place to put these files\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/53", @@ -37518,6 +38826,7 @@ "node_id": "MDU6SXNzdWUxNTk1MDg5MDM=", "title": "Add log stats and visualisation page", "body": "This should provide info on:\n- latest deploy stats (i.e. server boot)\n- who is currently viewing the site\n- who has visited the site\n- changes made to existing teams\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/57", @@ -37547,6 +38856,7 @@ "node_id": "MDU6SXNzdWUxNTk3NzY1NzI=", "title": "Add version (or hash) to welcome page", "body": "this would be an easy way to know what version/commit we are currently in\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/58", @@ -37576,6 +38886,7 @@ "node_id": "MDU6SXNzdWUxNjI2MjE3OTY=", "title": "Add 'loading' status or icon to UI, and timeout detection", "body": "Important for AJAX calls which can look like they are not working (on slow connections)\n\n![image](https://cloud.githubusercontent.com/assets/656739/16407443/9e39a6c4-3d0b-11e6-9cf2-b28c4ee40727.png)\n\nAlso add a timeout to those calls so that an user message can be shown (saying something like: \"Could not complete the requests in x secs\")\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/83", @@ -37605,6 +38916,7 @@ "node_id": "MDU6SXNzdWUxNjM1MTcyNzg=", "title": "Refactor Render_View ctor", "body": "![image](https://cloud.githubusercontent.com/assets/656739/16540419/0e258f36-405c-11e6-9829-2e7e835f52e4.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/85", @@ -37635,6 +38947,7 @@ "node_id": "MDU6SXNzdWUxNjM1MjMxODU=", "title": "Write test that checks travis logs for leak of GitHub tokens", "body": "as seen at \n\n![image](https://cloud.githubusercontent.com/assets/656739/16541115/97536e66-4072-11e6-9c01-d6fb8caed273.png)\n\nand \n\nhttps://api.travis-ci.org/jobs/141860636/log.txt?deansi=true\n\nInteresting question is where and when to run this\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/86", @@ -37650,8 +38963,8 @@ "repository": 1145, "assignees": [], "labels": [ - 252, - 256 + 256, + 252 ] } }, @@ -37664,6 +38977,7 @@ "node_id": "MDU6SXNzdWUxNjQwNTQ0NTg=", "title": "Add git commit when content changes are made", "body": "This will allow the use of Git to track changes made to files\n\nThe idea is that everytime the save button is used, a git commit with those changes is made (after that we need to figure out a way to push those changes to the git server, so that there is an auto-backup of the data saved)\n\nUntil there are logged in users, the changes will be made using a generic bsimm-bot account\n\nWill be interesting to see if there can be race-conditions on multiple users triggering an save at the same time\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/93", @@ -37693,6 +39007,7 @@ "node_id": "MDU6SXNzdWUxNjQwNTgyMDg=", "title": "Refactor how controllers are loaded", "body": "Improve this so that all classes inside the controller folder are automatically added\n\n![image](https://cloud.githubusercontent.com/assets/656739/16616887/27010c0a-4378-11e6-9d0a-a20cc5378c66.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/96", @@ -37722,6 +39037,7 @@ "node_id": "MDU6SXNzdWUxNjQxMDAwNzI=", "title": "Add support for using the keyboard when filling in the data", "body": "for example It would make it faster to use the up-down arrows to move between activities and using other keys (or space or enter) to provide the values of 'Yes, No, NA and Maybe'\n\n![image](https://cloud.githubusercontent.com/assets/656739/16623065/593122f2-4394-11e6-82ac-84bfb18b8a7b.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/101", @@ -37751,6 +39067,7 @@ "node_id": "MDU6SXNzdWUxNjQxMDMyMjM=", "title": "It should be possible to take an existing scheme and compare it to another", "body": "As an organisation, I would be interested to see how my BSIMM score compares to OWASP SAMM or other models so that I understand my estimated maturity level for different maturity models. This could be achieved by translating the mappings between the two models as listed in https://github.com/OWASP/opensamm/blob/master/v1.1/mapping/SAMM1.1_BSIMM6_Mapping.xlsx.\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/102", @@ -37780,6 +39097,7 @@ "node_id": "MDU6SXNzdWUxNjQyODM4NTU=", "title": "Add support for partial YES values", "body": "for example data can be stored as a percentage on the YES value\n\n``` coffee\n{\n \"metadata\": {\n \"team\": \"Team A\"\n },\n \"activities\": {\n \"Governance\": {\n \"SM.1.1\": \"Yes:100\",\n \"SM.1.4\": \"Yes:30\",\n \"SM.2.2\": \"No\",\n \"SM.2.3\": \"Yes:60\",\n \"CP.1.1\": \"Maybe\",\n```\n\nor as an object (which will allow the storage of proof, i.e. definition of yes)\n\n``` coffee\n{\n \"metadata\": {\n \"team\": \"Team A\"\n },\n \"activities\": {\n \"Governance\": {\n \"SM.1.1\": { \"value\", \"Yes\" , \"percentage\": \"50\", \"proof\" : [ \"link to issue or wiki] }\n \"SM.1.4\": \"Yes:30\",\n \"SM.2.2\": \"No\",\n \"SM.2.3\": \"Yes:60\",\n \"CP.1.1\": \"Maybe\",\n```\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/104", @@ -37809,6 +39127,7 @@ "node_id": "MDU6SXNzdWUxNjQzMTIzNTI=", "title": "Add 'partial YES values' formula to schema", "body": "When adding support for partial YES values (see #104) I don't think that all activities will need a percentage value (i.e. 1% to 100%). In fact, some might only need 2 or 4 values (25%, 50%, 75% or 100%).\n\nAdd these mappings to the schema file so that we can control what type of data should be provided for a particular activity\n\nThe UI should reflect this\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/107", @@ -37838,6 +39157,7 @@ "node_id": "MDU6SXNzdWUxNjQ4MDgxNjM=", "title": "Add automatic calculation and mapping of Code Coverage", "body": "for example using \n\n```\n#!/bin/bash\n\nmocha --recursive --compilers coffee:coffee-script/register --require coffee-coverage/register-istanbul test\n./node_modules/.bin/istanbul report\nopen ./coverage/lcov-report/index.html\n```\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/123", @@ -37867,6 +39187,7 @@ "node_id": "MDU6SXNzdWUxNjQ4MTgxNTY=", "title": "In Maturity-Models-UI Split dev dependencies from build dependencies", "body": "There is no need to have the test dependencies when just wanting to run gulp https://github.com/OWASP/Maturity-Models-UI\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/124", @@ -37896,6 +39217,7 @@ "node_id": "MDU6SXNzdWUxNjQ4MjIwNzk=", "title": "Move package.json dependencies into API project", "body": "from Maturity-Models to Maturity-Models-API\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/125", @@ -37925,6 +39247,7 @@ "node_id": "MDU6SXNzdWUxNjQ4MzA0OTc=", "title": "Add option to run QA tests first on localhost then on hosted docker", "body": "At the moment the QA tests executed from https://github.com/OWASP/Maturity-Models/blob/master/.travis.yml will detect travis execution and run the tests on deployed docker server\n\n![image](https://cloud.githubusercontent.com/assets/656739/16731278/1c01e84c-476f-11e6-93e6-5d1438a81099.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/127", @@ -37940,9 +39263,9 @@ "repository": 1145, "assignees": [], "labels": [ + 258, 253, - 254, - 258 + 254 ] } }, @@ -37955,6 +39278,7 @@ "node_id": "MDU6SXNzdWUxNjQ4MzM1NDE=", "title": "Add add test for Routes.list_Fixed", "body": "![image](https://cloud.githubusercontent.com/assets/656739/16731688/5a589742-4771-11e6-8eea-7b56b9ba6caf.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/128", @@ -37970,9 +39294,9 @@ "repository": 1145, "assignees": [], "labels": [ + 259, 253, - 254, - 259 + 254 ] } }, @@ -37985,6 +39309,7 @@ "node_id": "MDU6SXNzdWUxNjQ4MzQzMzM=", "title": "Routes.list_Fixed add logic to also map other variables (like project)", "body": "at the moment we are defaulting to the `bsimm` project\n\n![image](https://cloud.githubusercontent.com/assets/656739/16731777/cd45e2fa-4771-11e6-90e8-93c6883cca82.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/129", @@ -38014,6 +39339,7 @@ "node_id": "MDU6SXNzdWUxNjUzMTA3NzE=", "title": "API-Team add better way to handle pretty support and set JSON header", "body": "![image](https://cloud.githubusercontent.com/assets/656739/16803647/714338b0-48ff-11e6-9e04-0784175d88b9.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/135", @@ -38043,6 +39369,7 @@ "node_id": "MDU6SXNzdWUxNjYwOTk0OTQ=", "title": "Move .fullWidth into it's own css page", "body": "![image](https://cloud.githubusercontent.com/assets/656739/16917053/df4fd4ca-4cf7-11e6-888c-27ba037b622a.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/142", @@ -38072,6 +39399,7 @@ "node_id": "MDU6SXNzdWUxNjYxMDIzNDg=", "title": "fix watch gulp task - remove hard-coded list", "body": "At the moment there are two places that contain the list of the 'tasks to execute on build'\n\n![image](https://cloud.githubusercontent.com/assets/656739/16917462/9e06ee98-4cf9-11e6-88ae-d2518c2b7e38.png)\n\nThere should only be one\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/143", @@ -38101,6 +39429,7 @@ "node_id": "MDU6SXNzdWUxNjczNDM2Mzc=", "title": "Create email friendly version of edit page", "body": "The idea is to make it easy to email the questions to an Security Champion and get them to reply with the answers\n\nAt the moment the workflow is based on the PDF that can be saved from the http://localhost:3000/view/bsimm/team-aiegy/edit view (since there isn't a live site available to SCs)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/149", @@ -38132,6 +39461,7 @@ "node_id": "MDU6SXNzdWUxNjczNDg3NDI=", "title": "Team table view - show team title ", "body": "instead of the team name\n\n![image](https://cloud.githubusercontent.com/assets/656739/17100542/eedbb034-5266-11e6-94fb-245191d86429.png)\n\nhere is the code to change\n\n![image](https://cloud.githubusercontent.com/assets/656739/17100532/de757cac-5266-11e6-8d45-265176e1923e.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/150", @@ -38161,6 +39491,7 @@ "node_id": "MDU6SXNzdWUxNzQ5NDQ5MDQ=", "title": "Add feature to JsDom-API to capture requests made", "body": "This will make timeout issues easier to debug and help in testing #140 \n\n![image](https://cloud.githubusercontent.com/assets/656739/18230761/8feaceaa-729d-11e6-8a07-780bcf805eea.png)\n2\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/157", @@ -38190,6 +39521,7 @@ "node_id": "MDU6SXNzdWUxNzQ5NDUxODM=", "title": "Fix ng-view contents test", "body": "This tests is currently checking the status of the pages when the menus are clicked and it is not very stable \n\n``` coffee\n # todo: this test is not very stable at the moment,\n # I think https://github.com/OWASP/Maturity-Models/issues/140 is part of the problem\n xit 'check ng-view contents', (done)->\n using jsDom, ->\n links = @.$('.sub-nav a') # get all navigation links\n\n test_Link = (text, url, next)=> # function to test links\n link = links[index++] # get link to test\n link.text.assert_Is text # confirm link text is expected value\n link.click() # click on link\n jsDom.wait_No_Http_Requests =>\n @.$location().url().assert_Is url # confirm location matches expected value\n next()\n\n index = 1 # this is a really weird timing bug since this check doesn't work here\n #test_Link 'bsimm', \"/view/project/#{project}\"\n #test_Link 'view' , \"/view/#{project}/#{team}\"\n #test_Link 'bsimm', \"/view/project/#{project}\" , ->\n test_Link 'table', \"/view/#{project}/#{team}/table\", ->\n\n return done()\n test_Link 'radar', \"/view/#{project}/#{team}/radar\", ->\n test_Link 'edit' , \"/view/#{project}/#{team}/radar\", ->\n wrong_value = \"/view/#{project}/#{team}/radar\" #todo: wrong value\n test_Link 'raw' , \"/view/#{project}/#{team}/radar\", ->\n test_Link 'schema' , wrong_value , -> # \"/view/#{project}/#{team}/radar\", ->\n index = 0 # but it works here\n test_Link 'bsimm', wrong_value , -> # \"/view/project/#{project}\" , ->\n done()\n```\n\n![image](https://cloud.githubusercontent.com/assets/656739/18230781/dc421092-729d-11e6-9f80-afba54cf8ab7.png)\n\nRelated to #140 and #157\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/158", @@ -38220,6 +39552,7 @@ "node_id": "MDU6SXNzdWUxODEwOTE2Mzc=", "title": "Upgrade to BSSIM 7", "body": "https://www.bsimm.com/\n![image](https://cloud.githubusercontent.com/assets/656739/19106018/0ab08bbe-8add-11e6-83d0-67679b089dee.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/159", @@ -38248,6 +39581,7 @@ "node_id": "MDU6SXNzdWUxODM5MjI1OTU=", "title": "Add ability to rename teams", "body": "Add support for renaming teams inside a project\n\nThis has some security implications due to path injection on rename value\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/161", @@ -38277,6 +39611,7 @@ "node_id": "MDU6SXNzdWUxODQyNjM3MTE=", "title": "Run ZAP proxy baseline scan against every deployment to Digital ocean", "body": "Start with this one and then evolve it into more custom scans\n\nCheck if these scans show up in logs\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/163", @@ -38292,8 +39627,8 @@ "repository": 1145, "assignees": [], "labels": [ - 249, - 256 + 256, + 249 ] } }, @@ -38306,6 +39641,7 @@ "node_id": "MDU6SXNzdWUxODQ0NDUyOTQ=", "title": "Scores shown in Table view are not updated on save", "body": "after save\n\n![image](https://cloud.githubusercontent.com/assets/656739/19594329/7bdf8410-977c-11e6-9650-62f2e01ea33a.png)\n\nafter refresh\n\n![image](https://cloud.githubusercontent.com/assets/656739/19594342/8ca7a656-977c-11e6-806a-2f667eadd5a3.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/166", @@ -38335,6 +39671,7 @@ "node_id": "MDU6SXNzdWUxODQ1OTM4MDU=", "title": "Fix UI tests naming", "body": "So that they are not mis-aligned \n\n![image](https://cloud.githubusercontent.com/assets/656739/19615037/8510776c-97f2-11e6-8866-7de82cff1175.png)\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/171", @@ -38364,6 +39701,7 @@ "node_id": "MDU6SXNzdWUxODYyMzM3MTI=", "title": "Add support for groups or 'teams of teams'", "body": "This would allow the consolidation of teams data and results \r\n\r\nHandles the case where a particular project will have more than one sub-project which requires a Maturity Model in its own right\r\n\r\nAlso handles the case of submodules used by a particular project (since there is an argument that they should affect the master one)\r\n\r\nOne idea will be to have a global score for the entire project (based on the consolidate number of Yes and No values), which can then be feed into another projects (this would actually scale quite nicely since it would allow a chained of project's dashboards, each one feeding the one above, all the way to a global CISO dashboard)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/175", @@ -38393,6 +39731,7 @@ "node_id": "MDU6SXNzdWUxODYyNTM5NTk=", "title": "Add CI support to auto checkout master (or develop) branch in all submodules", "body": "This will make the process smother and not require regular submodule sync commits on base repo (only doing those commits on release, not on bug fix)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/177", @@ -38408,8 +39747,8 @@ "repository": 1145, "assignees": [], "labels": [ - 252, - 258 + 258, + 252 ] } }, @@ -38422,6 +39761,7 @@ "node_id": "MDU6SXNzdWUxODczMDc1MjM=", "title": "Find way to integrate publish data created with confluence", "body": "This could be a button called 'publish to confluence' which would use an api like https://www.npmjs.com/package/confluence-api to maintain a page in that wiki with BSIMM data from a team\r\n\r\nThe publish mode would also allow for the current 'offline' execution mode, and maybe an Jenkins integration where a job is used to keep data published to private repos (with customer specific data) in sync with confluence\r\n\r\nAnother option (more short term, and practical) is to have a way to easily export and import data from/to confluence\r\n\r\nThat way the security champions would use the confluence to edit the data (with the data manually syncronised, via an import workflow, by the central AppSec team)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/185", @@ -38440,8 +39780,8 @@ ], "labels": [ 249, - 257, - 260 + 260, + 257 ] } }, @@ -38454,6 +39794,7 @@ "node_id": "MDU6SXNzdWUxODc3MzY2NTA=", "title": "Move yes-answers css into its own file", "body": "At the moment it is on activity-table.css file\r\n\r\n![image](https://cloud.githubusercontent.com/assets/656739/20062681/77f43480-a4fc-11e6-81dd-eeb49628cb89.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/191", @@ -38483,6 +39824,7 @@ "node_id": "MDU6SXNzdWUxOTA0NTAzNjE=", "title": "Write server start-up timestamp and git head value to disk and expose on API", "body": "Will help to detect current deployed version and release time", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/194", @@ -38511,6 +39853,7 @@ "node_id": "MDU6SXNzdWUxOTA0NTI0NzM=", "title": "When creating new team set metadata team value to team name", "body": "At the moment the value is empty\r\n\r\n![image](https://cloud.githubusercontent.com/assets/656739/20451085/990eda5e-aded-11e6-9f88-646f857606fc.png)\r\n\r\nthis will create a prob when using the metadata.team value for the view's title", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/195", @@ -38536,10 +39879,11 @@ "pk": 1360, "fields": { "nest_created_at": "2024-09-11T21:32:43.268Z", - "nest_updated_at": "2024-09-11T21:32:43.268Z", + "nest_updated_at": "2024-09-13T16:15:44.290Z", "node_id": "MDU6SXNzdWUxOTYwMzg4MTk=", "title": "Rename project_Get to project_Teams", "body": "And the rest call from /project/get/#{project} to /project/teams/#{project}\r\n\r\n![image](https://cloud.githubusercontent.com/assets/656739/21261362/d5f6ed8a-c382-11e6-810d-49311949c265.png)\r\n\r\nIt makes more sense as a name\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/208", @@ -38571,6 +39915,7 @@ "node_id": "MDU6SXNzdWUxOTgyNzI3NzI=", "title": "pass $injector into services", "body": "will make it more robust and less dependent on the order of params\r\n![image](https://cloud.githubusercontent.com/assets/656739/21581958/96775d98-d03e-11e6-9106-e4d1e7140215.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/214", @@ -38599,6 +39944,7 @@ "node_id": "MDU6SXNzdWUyMDQ1NjIyNzk=", "title": "Keep track of user's table field choises", "body": "At the moment when opening multiple teams in different tabs we lose the current settings (for example keeping the 'Proof edit' open\r\n\r\n![image](https://cloud.githubusercontent.com/assets/656739/22505483/a9c87c40-e873-11e6-8bb1-2e573c8025a1.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/218", @@ -38628,6 +39974,7 @@ "node_id": "MDU6SXNzdWUyMDQ1NzE1ODU=", "title": "Create better proofs view", "body": "At the moment the data is shown here\r\n\r\n![image](https://cloud.githubusercontent.com/assets/656739/22506789/3c0b0eb4-e87a-11e6-8a32-686364956496.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/219", @@ -38657,6 +40004,7 @@ "node_id": "MDU6SXNzdWUyMDQ1NzE4MTQ=", "title": "Saving data doesn't update radar view", "body": "![image](https://cloud.githubusercontent.com/assets/656739/22506802/5169d5c4-e87a-11e6-91d3-b6ed3de8c7ec.png)\r\n\r\nTried a couple options but this needs to be done carefully\r\n\r\n![image](https://cloud.githubusercontent.com/assets/656739/22506817/623cc3fc-e87a-11e6-8823-641018972c20.png)\r\n\r\n(as seen above, we need to add a test for multiple saves)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/220", @@ -38686,6 +40034,7 @@ "node_id": "MDU6SXNzdWUyMDU2MzQxOTM=", "title": "Add ability to diff two teams", "body": "Useful for the cases where there is a need to merge two mappings or to do more detailed comparisons between teams", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/222", @@ -38715,6 +40064,7 @@ "node_id": "MDU6SXNzdWUyMDU2MzQ0MjQ=", "title": "Add ability to control the score formula settings from a config file", "body": "Namely the decision to use Maybe values as a YES (on both score and observed)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/223", @@ -38744,6 +40094,7 @@ "node_id": "MDU6SXNzdWUyMDU2Mzk3Nzk=", "title": "Add ability to control the threshold for the observed colors", "body": "![image](https://cloud.githubusercontent.com/assets/656739/22656165/609b8218-ec8a-11e6-91a3-71df496f3b9a.png)\r\n\r\nat the moment they are controlled by this css (which also need refactoring)\r\n\r\n![image](https://cloud.githubusercontent.com/assets/656739/22656200/806cc94e-ec8a-11e6-9ef9-584fbcf9354c.png)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/224", @@ -38769,11 +40120,12 @@ "model": "github.issue", "pk": 1368, "fields": { - "nest_created_at": "2024-09-11T21:32:57.000Z", - "nest_updated_at": "2024-09-11T21:32:57.000Z", + "nest_created_at": "2024-09-11T21:32:57Z", + "nest_updated_at": "2024-09-11T21:32:57Z", "node_id": "MDU6SXNzdWUyMTAyNzgwNDQ=", "title": "Add input validation on the proof ", "body": "After entering huge value in proof field, the page kept the status \"saving data ....\". I am not sure if the value was rejected on the server side and didn't update the page status or if the huge value caused some crash.\r\nlink:\r\nhttp://138.68.145.52/view/samm/team-E/table\r\nkey:\r\nSM.3.A\r\n\r\nIt would be beneficial to add validation on this field and show the proper message to the user.\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/225", @@ -38789,8 +40141,8 @@ "repository": 1145, "assignees": [], "labels": [ - 252, - 256 + 256, + 252 ] } }, @@ -38803,6 +40155,7 @@ "node_id": "MDU6SXNzdWUyMTAyNzg0MjQ=", "title": "Export reports ", "body": "Add the ability to export the radar or score reports per team. or per organization into PDF.\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/226", @@ -38829,6 +40182,7 @@ "node_id": "MDU6SXNzdWUyMTAyNzg2NzA=", "title": "More search filters", "body": "Add more flexible search filters especially in the following pages:\r\n\r\nSchema details\r\nhttp://138.68.145.52/view/project/samm/schema-details\r\nObserved details\r\nhttp://138.68.145.52/view/project/samm/observed-details", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/227", @@ -38858,6 +40212,7 @@ "node_id": "MDU6SXNzdWUyMTAyNzg4MjM=", "title": "Show indication of unanswered questions quantity", "body": "Add quick link or shortcuts to show and jump to the remaining unanswered (pending) activities questions.\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/228", @@ -38884,6 +40239,7 @@ "node_id": "MDU6SXNzdWUyNDMxNzQyNzA=", "title": "Team data not shown", "body": "I exported the data from the Generic DevOps Security Maturity Model in a data project https://github.com/wurstbrot/genericDevOpsSecurityMaturityModel-Data with the relevant json files.\r\n\r\nI added level 1 till 3 as an example for the team data, but the data is not shown in the radar. What am I doing wrong?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/232", @@ -38910,6 +40266,7 @@ "node_id": "MDU6SXNzdWU0MDU4NDE3NDU=", "title": "Blank View when running app using npm", "body": "Blank View when running `npm run dev` the application runs without errors. I can see the application with the top menu (Maturity Models, Projects, API), but the view below just show a number (e.g. 29). Is there a step I'm missing to see the information here?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Maturity-Models/issues/234", @@ -38936,6 +40293,7 @@ "node_id": "MDU6SXNzdWUxMjcwNTAyNjM=", "title": "Add support for find security bugs.", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/15", @@ -38965,6 +40323,7 @@ "node_id": "MDU6SXNzdWUxMjcwNTAyNzI=", "title": "Add support for PMD.", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/16", @@ -38994,6 +40353,7 @@ "node_id": "MDU6SXNzdWUxMjcwNTAyODI=", "title": "Add support for gauntlt.", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/17", @@ -39023,6 +40383,7 @@ "node_id": "MDU6SXNzdWUxMzMyMzUwNDQ=", "title": "Question: With more tools being added to a single container is ther concern for image bloat?", "body": "Looks like this is going to be very bulky, and would take a significant amount of time to pull down. 0.8.0 is already around pushing 1 GB.\n\nWhat about maintaining a number of containers for each of the features. They would all be components of the pipeline, and then I would be able to choose what I need. Eg... having node tools is great, but only if I need to test NodeJs. \n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/22", @@ -39052,6 +40413,7 @@ "node_id": "MDU6SXNzdWUxMzY3MjIzMzU=", "title": "Consider checking for PHP malware", "body": "https://github.com/nbs-system/php-malware-finder\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/23", @@ -39081,6 +40443,7 @@ "node_id": "MDU6SXNzdWUxMzc5NjAwMTM=", "title": "add support for Snyk", "body": "http://snyk.io\n\nFrom: https://github.com/groupon/codeburner/issues/1\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/27", @@ -39110,6 +40473,7 @@ "node_id": "MDU6SXNzdWUxNzcwMzUyMzg=", "title": "Document and Test Jenkins Integration (Plugin?)", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/30", @@ -39127,9 +40491,9 @@ 273 ], "labels": [ + 264, 261, - 263, - 264 + 263 ] } }, @@ -39142,6 +40506,7 @@ "node_id": "MDU6SXNzdWUxNzcwMzUzMzQ=", "title": "Document and update JIRA integration.", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/31", @@ -39159,8 +40524,8 @@ 273 ], "labels": [ - 261, - 264 + 264, + 261 ] } }, @@ -39173,6 +40538,7 @@ "node_id": "MDU6SXNzdWUxNzcwMzU0Mzg=", "title": "Document and update github issues integration.", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/32", @@ -39188,8 +40554,8 @@ "repository": 1151, "assignees": [], "labels": [ - 261, - 264 + 264, + 261 ] } }, @@ -39202,6 +40568,7 @@ "node_id": "MDU6SXNzdWUxNzcwMzU2Mzc=", "title": "Integrate with ZAP via API.", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/33", @@ -39231,6 +40598,7 @@ "node_id": "MDU6SXNzdWUxNzgxMTA4MjM=", "title": "Consider looking at S3 bucket contents and catching change over time.", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/34", @@ -39260,6 +40628,7 @@ "node_id": "MDU6SXNzdWUxODg5Mzc0OTE=", "title": "Consider Sample Integration with Drone.io Pipeline", "body": "https://github.com/drone/drone\r\nDrone is a CI/CD solution that aims to be a lightweight replacement for jenkins. It is written in Golang uses Docker containers to run builds. Builds are run based on a single instruction file in `drone.yml`\r\n\r\nv0.5 of drone.io includes the concept of a `pipeline` which expands drones support of [services](http://readme.drone.io/0.5/usage/services/) which are docker containers that are accessable to the build localhost, samples include a selenium/selenium grid which could be used to proxy auth for OWASP zap, carry out any future Gauntlt tasks, or provide a storage service to upload the output of the tests to a non-emphemeral external service like s3 or DB", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/37", @@ -39289,6 +40658,7 @@ "node_id": "MDU6SXNzdWUxODk1MzIzMTY=", "title": "Add S3 mounter.", "body": "Make it easy to use Glue to run AV and DLP (hashdeep) against an S3 bucket.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/38", @@ -39319,6 +40689,7 @@ "node_id": "MDU6SXNzdWUxOTY4OTA3Nzc=", "title": "Getting Segmentation fault error", "body": "owasp-pipeline-0.8.7/lib/pipeline/util.rb:13: [BUG] Segmentation fault at 0x00000000000000\r\nruby 2.3.0p0 (2015-12-25 revision 53290) [x86_64-linux]\r\n\r\n-- Control frame information -----------------------------------------------\r\nc:0003 p:---- s:0008 e:000007 CFUNC :gets\r\nc:0002 p:0036 s:0005 e:000004 BLOCK /usr/local/rvm/gems/ruby-2.3.0/gems/owasp-pipeline-0.8.7/lib/pipeline/util.rb:13 [FINISH]\r\nc:0001 p:---- s:0002 e:000001 (none) [FINISH]\r\n\r\n-- Ruby level backtrace information ----------------------------------------\r\n/usr/local/rvm/gems/ruby-2.3.0/gems/owasp-pipeline-0.8.7/lib/pipeline/util.rb:13:in `block (2 levels) in runsystem'\r\n/usr/local/rvm/gems/ruby-2.3.0/gems/owasp-pipeline-0.8.7/lib/pipeline/util.rb:13:in `gets'\r\n\r\n-- Machine register context ------------------------------------------------\r\n RIP: 0x00007f9c8fed6e21 RBP: 0x00000000000000b0 RSP: 0x00007f9c3827ffc0\r\n RAX: 0x00007f9c3828004f RBX: 0x00007f9c2c000020 RCX: 0x0000000000000007\r\n RDX: 0x00000000000000b0 RDI: 0x00007f9c2c000020 RSI: 0x00000000000136a0\r\n R8: 0x0000000000000000 R9: 0x6f6e203a766e652f R10: 0x0000000000000000\r\n R11: 0x0000000000000206 R12: 0x00007f9c2c47cb00 R13: 0x000000000000000b\r\n R14: 0x00007f9c2c000078 R15: 0x0000000000002710 EFL: 0x0000000000010283\r\n\r\n-- C level backtrace information -------------------------------------------\r\n\r\n\r\nSystem configuration:\r\n ubuntu 14.04, 4 core ,16GB RAM,Ruby 2.3.0,Rails 4.2.5\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/42", @@ -39334,9 +40705,9 @@ "repository": 1151, "assignees": [], "labels": [ + 265, 261, - 262, - 265 + 262 ] } }, @@ -39349,6 +40720,7 @@ "node_id": "MDU6SXNzdWUxOTkzNzg5MzI=", "title": "Passing authMethodName/authMethodConfigParams to ZAP", "body": "Is there a way to pass these arguments to ZAP through glue to authenticate with a website you're testing against? \r\n\r\nhttps://github.com/zaproxy/zaproxy/wiki/FAQformauth", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/43", @@ -39364,9 +40736,9 @@ "repository": 1151, "assignees": [], "labels": [ + 266, 261, - 262, - 266 + 262 ] } }, @@ -39379,6 +40751,7 @@ "node_id": "MDU6SXNzdWUyMDA4NzI2MTA=", "title": "Managing Secrets", "body": "Add support for pulling config / secrets from tiers of locations: \r\n0. Defaults\r\n1. CLI\r\n2. ENV\r\n3. Vault\r\n\r\nBasically, when run, set defaults, apply CLI options, then read from ENV, then pull from Vault if configured.\r\n\r\nThe purpose is to make it so that it can all work without putting secrets in stored CLI in Jenkins (for example).", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/45", @@ -39408,6 +40781,7 @@ "node_id": "MDU6SXNzdWUyMjYzMDA2MDQ=", "title": "CSV Reports Don't Escape Properly", "body": "Some of the columns in the CSV contain commas, which makes CSV a fairly inappropriate choice for output format.\r\n\r\nSuggestion: consider removing CSV support.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/59", @@ -39423,9 +40797,9 @@ "repository": 1151, "assignees": [], "labels": [ + 265, 261, - 262, - 265 + 262 ] } }, @@ -39438,6 +40812,7 @@ "node_id": "MDU6SXNzdWUyMjY2OTgzMzQ=", "title": "HTML Report Format", "body": "It would be useful for Glue to optionally output HTML and possibly other human-consumable formats.\r\n\r\nThe use case is Glue running from Jenkins. Sometimes, in lieu of pushing directly to Jira, it would be useful to just email formatted results directly to stakeholders.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/61", @@ -39468,6 +40843,7 @@ "node_id": "MDU6SXNzdWUyNTE4ODMzNDc=", "title": "Use nmap to test for weak ssl ciphers", "body": "Hey\r\nNMap can be used to test for weak ssl ciphers using:\r\n`nmap --script ssl-cert,ssl-enum-ciphers -p 443 -oX output.xml host`\r\nIt will be nice to run it from glue and process the XML so Glue will be able to report on weak ciphers (nmap rank them, so we can report on each cipher below A grade, and have the minimum grade configurable).\r\nI've created a little script that does exactly that - you can find the code [here](https://github.com/omerlh/test-ssl-cipher-suites). It is in Ruby, so it will be very easy to move the code into Glue.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/81", @@ -39497,6 +40873,7 @@ "node_id": "MDU6SXNzdWUyNTMyMzgwNjA=", "title": "Test for fingerprinting headers", "body": "There are a few fingerprinting headers, for example: \r\n* X-Powered-By\r\n* X-AspNet-Version\r\n* X-AspNetMvc-Version\r\n\r\nThat expose information about the server. We can easily add a test for those headers using `curl` and `grep`, and report (if found) on those headers.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/82", @@ -39512,9 +40889,9 @@ "repository": 1151, "assignees": [], "labels": [ + 267, 261, - 263, - 267 + 263 ] } }, @@ -39527,6 +40904,7 @@ "node_id": "MDU6SXNzdWUyNzczODc5OTE=", "title": "Languages supported by glue", "body": "Hi,\r\n\r\nWhat are the languages supported by glue at this time?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/97", @@ -39542,8 +40920,8 @@ "repository": 1151, "assignees": [], "labels": [ - 261, - 266 + 266, + 261 ] } }, @@ -39556,6 +40934,7 @@ "node_id": "MDU6SXNzdWUzMTkyOTI0MTg=", "title": "Add support for plugins", "body": "Allow to extend glue with plugin, so supporting of new tools, filters or reporters will not require changing code in Glue.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/107", @@ -39585,6 +40964,7 @@ "node_id": "MDU6SXNzdWUzMjE2NzgyMzM=", "title": "Add dummy task ", "body": "Create a simple dummy task, to make it easier to play with Glue and understand it's features. The task should generate some findings, with different levels.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/113", @@ -39600,8 +40980,8 @@ "repository": 1151, "assignees": [], "labels": [ - 261, - 267 + 267, + 261 ] } }, @@ -39614,6 +40994,7 @@ "node_id": "MDU6SXNzdWUzMjE2ODcyOTk=", "title": "Define process for doing CI builds ", "body": "Produce:\r\n* ruby gem\r\n* docker image\r\n* GitHub release", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/115", @@ -39631,8 +41012,8 @@ 271 ], "labels": [ - 261, - 264 + 264, + 261 ] } }, @@ -39645,6 +41026,7 @@ "node_id": "MDU6SXNzdWUzMjE2ODgwNTY=", "title": "Write criteria for including or not including a tool", "body": "To be used as we review, document and test each new tool. (Eg. Which should be removed?)", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/117", @@ -39660,8 +41042,8 @@ "repository": 1151, "assignees": [], "labels": [ - 261, - 264 + 264, + 261 ] } }, @@ -39674,6 +41056,7 @@ "node_id": "MDU6SXNzdWUzMjE2ODgxMDU=", "title": "Document how to write a filter.", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/118", @@ -39689,9 +41072,9 @@ "repository": 1151, "assignees": [], "labels": [ - 261, 264, - 267 + 267, + 261 ] } }, @@ -39704,6 +41087,7 @@ "node_id": "MDU6SXNzdWUzMjE2ODgxNzk=", "title": "Document ZAP", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/119", @@ -39719,9 +41103,9 @@ "repository": 1151, "assignees": [], "labels": [ - 261, 264, - 267 + 267, + 261 ] } }, @@ -39734,6 +41118,7 @@ "node_id": "MDU6SXNzdWUzMjE2ODg0ODI=", "title": "Work to define \"rightsized\" level of testing ", "body": "Write that down in a markdown file. Will be collaboration across the team but need someone to take point and write it up", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/120", @@ -39749,8 +41134,8 @@ "repository": 1151, "assignees": [], "labels": [ - 261, - 264 + 264, + 261 ] } }, @@ -39763,6 +41148,7 @@ "node_id": "MDU6SXNzdWUzMjM0NTAwNDY=", "title": "Glue Dependencies", "body": "Hi,\r\n\r\nWhat are the glue dependencies for running as a standalone? the ones listed are for the tools it supports as far as i've seen.\r\n\r\nDoes it have any ruby gem dependencies?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/123", @@ -39778,8 +41164,8 @@ "repository": 1151, "assignees": [], "labels": [ - 261, - 266 + 266, + 261 ] } }, @@ -39792,6 +41178,7 @@ "node_id": "MDU6SXNzdWUzNjk4MTc0NDk=", "title": "Add github issue reporter.", "body": "Add a reporter that will take each finding and create a GitHub issue for it.\r\n\r\nUse the GitHub API.\r\n\r\nNeed to figure out auth. Probably do it with app token.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/138", @@ -39821,6 +41208,7 @@ "node_id": "MDU6SXNzdWUzOTI1NDUyOTI=", "title": "Add support for good findings", "body": "zaproxy/zaproxy#5155\r\nIdeally, security tools should be able to report also good findings. Glue needs to be able to support these findings too, and report them properly. For example, as passed tests in TeamCity formater.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/144", @@ -39849,6 +41237,7 @@ "node_id": "MDU6SXNzdWU0MjA2ODE2NDY=", "title": "GitMounter won't work with Azure Devops/VisualStudio Team Services repositories", "body": "GitMounter checks if last characters from the URL are '.git' to be enabled\r\n\r\nGit repositories from Azure Devops can be cloned using URLs like\r\n\r\n* ```https://dev.azure.com///_git/```\r\n* ```https://.visualstudio.com//_git/```\r\n\r\n```git clone``` will work with this URLs as long as Personal Access Token (PAT) is provided\r\n\r\n```git clone https://DUMMY:@dev.azure.com/MYORG/MYPROJECT/_git/MYREPO```\r\n\r\nSo I think maybe improve GitMounter is needed or just create a Mounter for Azure Devops repositories?\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/149", @@ -39877,6 +41266,7 @@ "node_id": "MDU6SXNzdWU0MjE2MDUwNDg=", "title": "Trufflehog severity hardcoded to 4", "body": "After running trufflehog task, it was skipped because severity of findings are hardcoded to 4 with ISSUE_SEVERITY at https://github.com/OWASP/glue/blob/master/lib/glue/tasks/trufflehog.rb#L10\r\n\r\n```\r\ncode - Trufflehog - #\r\nTrufflehog\r\nProblem running Trufflehog ... skipped.\r\nSeverity should be between 1 to 3, not 4\r\nSeverity should be between 1 to 3, not 4\r\n```", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/151", @@ -39905,6 +41295,7 @@ "node_id": "MDU6SXNzdWU0MjMxODU0MDU=", "title": "Docker ZAP integrated with OWASP Glue", "body": "Hi Omer, I've done all the steps from https://github.com/OWASP/glue/blob/master/docs/dynamic_task.md#zaproxy.\r\n\r\nI have a report in txt format, but I have a question on how to remove false-positives from this report ? Can I create a glue.json file with false-positives ?\r\n\r\nMy idea is ts to do something like this : \r\n1. modify ZAP Docker image (adding bash script with curl command - request for ZAP API, response with report in json format, save report on local disk)\r\n2. step by step : https://github.com/OWASP/glue/blob/master/docs/dynamic_task.md#zaproxy.\r\n3. create glue.json file with false-positives \r\n4. run command : ruby /bin/glue --finding-file-path \r\n\r\nWhat do you think about this solution ?\r\n\r\n[output.txt](https://github.com/OWASP/glue/files/2987763/output.txt)\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/152", @@ -39933,6 +41324,7 @@ "node_id": "MDU6SXNzdWU0MjYxMTIyNTg=", "title": "Glue Does Not Send Api Key to Zap In Header", "body": "Hey,\r\n\r\n**Summary**\r\n\r\nTrying to use the Glue task for ZAP as the next stage in my security testing pipeline however I am coming across an issue which seems to kill it.\r\n\r\n**Issue Found**\r\n\r\nGlue sends the API key to Zap as an HTTP query parameter instead of in the header which it expects. This causes Zap to throw an error like this:\r\n\r\n```\r\nProvided parameter has illegal or unrecognized value (illegal_parameter) : &apikey=myapikey&contextName=b67b6ff7-de5b-4094-9cd6-0983cd21ec9c\r\n\tat org.zaproxy.zap.extension.api.API.getParams(Unknown Source)\r\n\tat org.zaproxy.zap.extension.api.API.handleApiRequest(Unknown Source)\r\n\tat org.parosproxy.paros.core.proxy.ProxyThread.processHttp(Unknown Source)\r\n\tat org.parosproxy.paros.core.proxy.ProxyThread.run(Unknown Source)\r\n\tat java.lang.Thread.run(Thread.java:748)\r\n```\r\n\r\n**Reproduction Steps**\r\n\r\nAssuming you have Zap version 2.6.0 running in api mode you can recreate this issue using:\r\n\r\n```\r\ndocker run -i owasp/glue:raw-latest bin/glue -a my-api --target https://mytarget.net -t zap --zap-host http://0.0.0.0 --zap-port 8090 --zap-api-token myapikey\r\n```\r\n\r\nWhich will cause a log output of:\r\n\r\n```\r\nLoading scanner...\r\nLogfile nil?\r\ncalling scan\r\nRunning scanner\r\nMounting https://mytarget.net with #\r\nMounted https://mytarget.net with #\r\nProcessing target...https://mytarget.net\r\nRunning tasks in stage: wait\r\nRunning tasks in stage: mount\r\nRunning tasks in stage: file\r\nRunning tasks in stage: code\r\nRunning tasks in stage: live\r\nlive - Zap - #\r\n\r\nRunning tasks in stage: done\r\nRunning base report...\r\n\r\nRunning ZAP on: https://mytarget.net from http://0.0.0.0:8090 with b67b6ff7-de5b-4094-9cd6-0983cd21ec9c\r\n\r\n```\r\n\r\nAt this stage, the process dies due to the request sent to Zap including the Zap API Key in HTTP query param instead of the header.\r\n\r\n**Docker Image Versions Used**\r\n\r\n**Zap**: owasp/zap2docker-bare:2.6.0\r\n**Glue**: owasp/glue:raw-latest\r\n\r\nIf there is any more info I can provide to help figure the issue out let me know.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/154", @@ -39961,6 +41353,7 @@ "node_id": "MDU6SXNzdWU0Mjg0ODM0NTQ=", "title": "Duplicate tasks in JIRA", "body": "**Description**\r\nTasks are created in the JIRA system, but the filter that eliminates duplication does not work. Duplicates are created because the jql query is incorrect and incorrectly verified by the \"fingerprint\".\r\n\r\n**Proposed solution**\r\nI found a working solution.\r\nhttps://community.atlassian.com/t5/Jira-questions/CONTAINS-does-not-seem-to-work-when-searching-description/qaq-p/408602\r\nI tested this solution on local machine and it's working.\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/157", @@ -39989,6 +41382,7 @@ "node_id": "MDU6SXNzdWU0NDE0Mzg5NTE=", "title": "retire.js finds results but then errors, result file empty", "body": "Ran just retirejs scan on a project. The screen showed retire.js finding some issues but then hitting an error. unfortunately the glue output json was just [] So if you were just processing the output it would probably not indicate there were any errors.\r\n\r\nLog shows\r\nRetireJS scanning: /mnt/project\r\nMissing version for popper.js. Need to run npm install ?\r\nRetire JSON Raw Results: [{ A BUNCH OF RESULTS HERE }]\r\nProblem running RetireJS\r\n#\r\n\r\nAnd like I mentioned earlier the output .json file contains just []", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/160", @@ -40017,6 +41411,7 @@ "node_id": "MDU6SXNzdWU0NjQ2Nzk2MzU=", "title": "ZAP failing with: Supported versions = 2.4.0 and up - got 2.8.0", "body": "For both docker and installed glue fails to use ZAP:\r\n\r\n```\r\n~# docker run --network=\"host\" owasp/glue -t zap --zap-host http://127.0.0.1 --zap-port 8080 http://127.0.0.1:3000\r\nLoading scanner...\r\nLogfile nil?\r\ncalling scan\r\nRunning scanner\r\nMounting http://127.0.0.1:3000 with #\r\nMounted http://127.0.0.1:3000 with #\r\nProcessing target...http://127.0.0.1:3000\r\nRunning tasks in stage: wait\r\nRunning tasks in stage: mount\r\nRunning tasks in stage: file\r\nRunning tasks in stage: code\r\nRunning tasks in stage: live\r\nInstall ZAP from owasp.org and ensure that the configuration to connect is correct. Supported versions = 2.4.0 and up - got 2.8.0\r\nRunning tasks in stage: done\r\nRunning base report...\r\n\r\n\r\n~/glue# ruby bin/glue -t zap --zap-host http://localhost --zap-port 8080 http://localhost:3000\r\nLogfile nil?\r\ncalling scan\r\nRunning scanner\r\nLoading scanner...\r\nMounting http://127.0.0.1:3000 with #\r\nMounted http://127.0.0.1:3000 with #\r\nProcessing target...http://127.0.0.1:3000\r\nRunning tasks in stage: wait\r\nRunning tasks in stage: mount\r\nRunning tasks in stage: file\r\nRunning tasks in stage: code\r\nRunning tasks in stage: live\r\nInstall ZAP from owasp.org and ensure that the configuration to connect is correct. Supported versions = 2.4.0 and up - got 2.8.0\r\nRunning tasks in stage: done\r\nRunning base report...\r\n```", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/161", @@ -40045,6 +41440,7 @@ "node_id": "MDU6SXNzdWU0NzUwNTI1NTY=", "title": "Add a PDF reporter", "body": "I'm currently working on a PDF reporter that uses an ERB template to output an html, and then call `wkhtmltopdf` for rendering the final file. The main reason for me was to create a nice output for the report to send via the Slack reporter, in case the amount of issues found was large.\r\n\r\nThe changes should be relatively easy to merge, the only new dependency would be `wkhtmltopdf`. \r\n\r\nI'm open to discussion if anyone wants to propose a better solution", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/166", @@ -40073,6 +41469,7 @@ "node_id": "MDU6SXNzdWU0NzkyNzIyODA=", "title": "Problem with JIRA reporter in the internal corporate network ", "body": "**What ?**\r\nAt the attempt to connect JIRA and reporting security bugs - an error message appears:\r\n\r\n`SSL_connect returned=1 errno=0 state=error: certificate verify failed`\r\nJIRA is started in the enterprise network, and the certificate is signed through internal corporate CA.\r\nRootCA + InterCA was added to the docker container, but still \"certificate verify failed\".\r\n\r\n**I checked on JIRA Cloud - everything is works correctly.**\r\n\r\nIn the certificate:\r\nCN (commonName) = jira.company.com, but SAN (DNS Name) = *. company.com\r\n\r\nPerhaps it is a problem?\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/167", @@ -40101,6 +41498,7 @@ "node_id": "MDU6SXNzdWU0ODUxNTY4Nzk=", "title": "Initiate Checkmarx Scan ", "body": "Hey,\r\n\r\nI'm trying to initiate a Checkmarx scan, running the docker image with the following\r\n\r\n-t checkmarx\r\n--checkmarx-user\r\n--checkmarx-password\r\n--checkmarx-server\r\n--checkmarx-project\r\n\r\nI added runCxConsole.sh to the Path Variable\r\n\r\n\"GlueCheckmarxOutput\"\r\n\r\nWhat am I missing?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/168", @@ -40129,6 +41527,7 @@ "node_id": "MDU6SXNzdWU0OTE2NDQyNDY=", "title": "--exclude not working with SFL?", "body": "I'm running the following command, attempting to ignore `node_modules` but I can't quite seem to make it work. Can someone please offer advice?\r\n\r\n```\r\n\tdocker run --rm --name=Glue \\\r\n\t\t-v $(PWD):/tmp/triage owasp/glue \\\r\n\t\t-t sfl \\\r\n\t\t-t retirejs \\\r\n\t\t-t nodesecurityproject \\\r\n\t\t-f teamcity \\\r\n\t\t--teamcity-min-level 1 \\\r\n\t\t--exclude node_modules \\\r\n\t\t--finding-file-path /tmp/triage/glue_ignore.json \\\r\n\t\t--debug \\\r\n\t\t/tmp/triage\r\n```\r\n\r\nGlue runs normally, and my `--finding-file-path` correctly ignores results... but I'd like to always ignore anything found in `node_modules` since I don't control everything my dependencies drag in. Currently SFL is flagging like 30+ files from that directory.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/171", @@ -40157,6 +41556,7 @@ "node_id": "MDU6SXNzdWU1MzExNTUyMDM=", "title": "Slack reporter sending message without attachment", "body": "What ?\r\nSlack reporter sending message without attachment.\r\n\r\nUsing command: `sudo docker run --rm -i --mount type=bind,source=\"$(pwd)\",target=/glue/wrk/ owasp-glue ruby bin/glue -f slack --slack-token xoxb-TOKEN --slack-channel owasp-glue-test -t Dynamic -T /glue/wrk/output.json --mapping-file zaproxy --finding-file-path /glue/wrk/glue.json -d\r\n`\r\n\r\nOn Slack Channel message : \r\n\r\n**OWASP Glue test run completed - See attachment.**\r\n\r\nFrom console : \r\n\r\nLoading scanner...\r\nLogfile nil?\r\ncalling scan\r\nRunning scanner\r\nMounting ... /glue/wrk/output.json\r\nMounting target: /glue/wrk/output.json\r\nChecking about mounting /glue/wrk/output.json with #\r\nIn Docker mounter, target: /glue/wrk/output.json became: ut.json ... wondering if it matched .docker\r\nChecking about mounting /glue/wrk/output.json with #\r\nChecking about mounting /glue/wrk/output.json with #\r\nChecking about mounting /glue/wrk/output.json with #\r\nProcessing target.../glue/wrk/output.json\r\nRunning tasks in stage: wait\r\nRunning tasks in stage: mount\r\nRunning tasks in stage: file\r\nRunning tasks in stage: code\r\ncode - Dynamic - #\r\nRunning tasks in stage: live\r\nRunning tasks in stage: done\r\nStarting Contrast Severity Filter\r\nMinimum: \r\nNo minimum found, skipping filter.\r\nHave 24 items pre ZAP filter.\r\nHave 24 items post ZAP filter.\r\nchannel=CQRJCQLRH, message=bot_id=BQV5AC325, subtype=bot_message, text=OWASP Glue test run completed - See attachment., ts=1575303887.000200, type=message, username=bot, ok=true, ts=1575303887.000200\r\nowasp-glue\r\n\r\n\r\n**Why is there no attachment in the message? Maybe the command is incorrect?**", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/173", @@ -40185,6 +41585,7 @@ "node_id": "MDU6SXNzdWU1MzczNDkzMjA=", "title": "missing code, file, binary and certificate analysis from mapping file of mobsf", "body": "This is in reference to https://github.com/OWASP/glue/blob/master/lib/glue/mappings/mobsf.json\r\nCould you please provide key for formatting code_analysis and certificate analysis as well? Currently it is just providing manifest analysis only.\r\nFYI... Latest MobSF V3 API changed the \"app_name\": \"name\" to \"app_name\": \"app_name\" and \"key\": \"manifest\", to \"key\": \"manifest_analysis\".\r\n\r\nI drafted very vague mapping file for mobsf. Could you please correct this? Or could you provide an update mapping file would be great?\r\n\r\n{ \"task_name\": \"MobSF\", \"app_name\": \"app_name\", \"mappings\": [ { \"key\": \"manifest_analysis\", \"properties\": { \"description\": \"desc\", \"detail\": \"title\", \"source\": \"title\", \"severity\": \"stat\", \"fingerprint\": \"title\" } \"key\": \"certificate_analysis\", \"properties\": { \"description\": \"description\", \"detail\": \"certificate_info\", \"severity\": \"certificate_status\", \"fingerprint\": \"description\" } \"key\": \"binary_analysis\", \"properties\": { \"description\": \"desc\", \"detail\": \"title\", \"source\": \"file\", \"severity\": \"stat\", \"fingerprint\": \"title\" } \"key\": \"code_analysis\", \"properties\": { \"description\": \"desc\", \"detail\": \"owasp\", \"source\": \"path\", \"fingerprint\": \"owasp\" } \"key\": \"manifest_analysis\", \"properties\": { \"description\": \"android_api\" } } ] }", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/175", @@ -40209,10 +41610,11 @@ "pk": 1418, "fields": { "nest_created_at": "2024-09-11T21:34:41.853Z", - "nest_updated_at": "2024-09-11T21:34:41.853Z", + "nest_updated_at": "2024-09-13T16:16:03.934Z", "node_id": "MDU6SXNzdWU1Mzk0MDA2MTU=", "title": "`block (2 levels) in get_options': undefined local variable or method `path' for Glue::Options:Module (NameError)", "body": "Unable to run glue locally.\r\n\r\n`$ glue -T report.json \r\n\r\n/Library/Ruby/Gems/2.3.0/gems/owasp-glue-0.9.0/lib/glue/options.rb:49:in `block (2 levels) in get_options': undefined local variable or method `path' for Glue::Options:Module (NameError)\r\n\tfrom /System/Library/Frameworks/Ruby.framework/Versions/2.3/usr/lib/ruby/2.3.0/optparse.rb:1578:in `block in parse_in_order'\r\n\tfrom /System/Library/Frameworks/Ruby.framework/Versions/2.3/usr/lib/ruby/2.3.0/optparse.rb:1534:in `catch'\r\n\tfrom /System/Library/Frameworks/Ruby.framework/Versions/2.3/usr/lib/ruby/2.3.0/optparse.rb:1534:in `parse_in_order'\r\n\tfrom /System/Library/Frameworks/Ruby.framework/Versions/2.3/usr/lib/ruby/2.3.0/optparse.rb:1528:in `order!'\r\n\tfrom /System/Library/Frameworks/Ruby.framework/Versions/2.3/usr/lib/ruby/2.3.0/optparse.rb:1620:in `permute!'\r\n\tfrom /System/Library/Frameworks/Ruby.framework/Versions/2.3/usr/lib/ruby/2.3.0/optparse.rb:1642:in `parse!'\r\n\tfrom /Library/Ruby/Gems/2.3.0/gems/owasp-glue-0.9.0/lib/glue/options.rb:257:in `get_options'\r\n\tfrom /Library/Ruby/Gems/2.3.0/gems/owasp-glue-0.9.0/lib/glue/options.rb:16:in `parse!'\r\n\tfrom /Library/Ruby/Gems/2.3.0/gems/owasp-glue-0.9.0/bin/glue:11:in `'\r\n\tfrom /usr/local/bin/glue:22:in `load'\r\n\tfrom /usr/local/bin/glue:22:in `
'\r\n\r\n$ `", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/glue/issues/176", @@ -40237,10 +41639,11 @@ "pk": 1419, "fields": { "nest_created_at": "2024-09-11T21:34:47.543Z", - "nest_updated_at": "2024-09-11T21:34:47.543Z", + "nest_updated_at": "2024-09-13T16:16:08.286Z", "node_id": "MDU6SXNzdWU2Mjc5ODkyNjQ=", "title": "vbscan does not show any information", "body": "cmd = perl vbscan.pl url.com\r\nno output, just says your reports: reports//\r\ngoing to the report files created shows a blank txt file and blank html\r\nPerl version: 5.30.1\r\nuname -an = Darwin root.local 19.4.0 Darwin Kernel Version 19.4.0: Wed Mar 4 22:28:40 PST 2020; root:xnu-6153.101.6~15/RELEASE_X86_64 x86_64", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/vbscan/issues/20", @@ -40267,6 +41670,7 @@ "node_id": "MDU6SXNzdWU5NzkxOTcxMDA=", "title": "Adaptation", "body": "Hello man. You can adapt it to the latest version perl?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/vbscan/issues/21", @@ -40293,6 +41697,7 @@ "node_id": "MDU6SXNzdWUxNjUzOTM3OTA=", "title": "Pressing TAB at file_to_execute", "body": "This happens\n\nhttps://s32.postimg.org/5nonvgrvp/Screen_Shot_2016_07_13_at_11_21_05_PM.png\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/68", @@ -40322,6 +41727,7 @@ "node_id": "MDU6SXNzdWUxNjYyMzA4NTQ=", "title": "software not exit the loop if find a null in shellcode", "body": "hello friends,\n\nI release that when we use a specific value for encoding shellcodes example( sub/xor/add_yourvalue ) it could make a null and there is `if` in script it tries again if find `\\x00` in software.\n\nfor example run this command `zsc.py -p windows_x86/exec/xor_0x41414141 -i calc.exe`, we need to check and if there is a null, return an error, also there is more, inc and dec also could make the nulls, \n\n`'%x'%(int('0x4f5ec401',16) - int('0x1',16))\n'4f5ec400'`\n\nor\n\n```\n '%x'%(int('0x4f5ec4ff',16) + int('0x1',16))\n'4f5ec500'\n```\n\nwe need to return error and tell user this value make nulls or change value +1 or -1 \n\n@Pratik151 please notice that until you adding encoder and we not fix it \n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/80", @@ -40350,6 +41756,7 @@ "node_id": "MDU6SXNzdWUxODUzOTYzODQ=", "title": "No tab completion when calling ZSC from different directory", "body": "When calling ZSC from the same directory, tab completion works fine.\n\ne.g.\n`$ python zsc.py`\n\nHowever, if I call ZSC from a different directory then the tab completion inserts tabs rather than doing a completion.\n\ne.g.\n`$ python OWASP-ZSC/zsc.py`\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/110", @@ -40378,6 +41785,7 @@ "node_id": "MDU6SXNzdWUxODU0MTgwMTg=", "title": "shell-storm search changes contexts", "body": "When I want to download a payload from shell-storm through the ZSC menu, I type \"shellcode\" (\"zsc/shellcode>\") and then \"search\" (\"keyword_to_search>\") and then my keyword to search for. After this search I find the payload that I want but my context has changed back to the root (\"zsc>\"). I have to type \"shellcode\" (\"zsc/shellcode>\") to get back to where I download the payload.\n\n![screen shot 2016-10-26 at 10 53 56 am](https://cloud.githubusercontent.com/assets/10089864/19731157/883d5592-9b6a-11e6-9e66-db58063f4c98.jpg)\n\nThe context should not change back after the search. It should instead stay in the \"zsc/shellcode>\" context so that I can immediately download the payload that I found with my search instead of having to go back into the right context.\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/112", @@ -40406,6 +41814,7 @@ "node_id": "MDU6SXNzdWUxOTYxOTEzMjM=", "title": "Confusing addition to `help` command", "body": "Why have [zsc -h](https://github.com/zscproject/OWASP-ZSC/blob/master/core/commands.py#L226) in help command inside zsc shell since this command will not be functional inside the shell?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/114", @@ -40434,6 +41843,7 @@ "node_id": "MDU6SXNzdWUxOTYyNTU3NjE=", "title": "Implement continuous integration with travis", "body": "Why Continuous Integration: https://about.gitlab.com/2015/02/03/7-reasons-why-you-should-be-using-ci/\r\n\r\nTravis: https://travis-ci.org/\r\n\r\nThis would require writing tests for various modules and obfuscating scripts.\r\n\r\n@Ali-Razmjoo @jowasp ", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/115", @@ -40464,6 +41874,7 @@ "node_id": "MDU6SXNzdWUxOTc1NjM5MTg=", "title": "After one has choosen obfuscate and then the language there is no way to go back to change language", "body": "", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/117", @@ -40492,6 +41903,7 @@ "node_id": "MDU6SXNzdWUyMjI0MDMyNzg=", "title": "issue in php encoders", "body": "As you see in this [line](https://github.com/zscproject/OWASP-ZSC/blob/master/lib/encoder/php/base64_rev.py#L67) all of `` will be remvoed.\r\n\r\n```\r\ncontent = content.replace('', '')\r\n```\r\nit will work fine if our php script is including just **one** ` section 2 is html \r\n```\r\n\r\nexecute before and after encoding:\r\n\r\n```\r\nC:\\Users\\Zombie\\Downloads\\OWASP-ZSC-master>php 1.php\r\nsection 1 section 2 is html section 3 is php\r\nC:\\Users\\Zombie\\Downloads\\OWASP-ZSC-master>php 1.php\r\n\r\nParse error: syntax error, unexpected '2' (T_LNUMBER) in C:\\Users\\Zombie\\Downloads\\OWASP-ZSC-master\\1.php(12) : eval()'d code on line 1\r\n\r\nC:\\Users\\Zombie\\Downloads\\OWASP-ZSC-master>\r\n```\r\n\r\ndecode generated file: \r\n```\r\n echo \"section 1\"; section 2 is html echo \"section 3 is php\";\r\n```\r\nwhich it must be this to work fine:\r\n```\r\necho \"section 1\"; ?> section 2 is html \r\n```\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/119", @@ -40516,10 +41928,11 @@ "pk": 1429, "fields": { "nest_created_at": "2024-09-11T21:35:06.472Z", - "nest_updated_at": "2024-09-11T21:35:06.472Z", + "nest_updated_at": "2024-09-13T16:16:12.370Z", "node_id": "MDU6SXNzdWUyNTUwNTE5Mjc=", "title": "Update fails", "body": "ZSC failes to update with the following error message:\r\n\r\n```\r\nzsc> update\r\nyour software version: 1.1.0\r\nlast version released: up_url = 'http://zsc.z3r0d4y.com/zsc_archive/'\r\n\r\nredirects to the main page.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/123", @@ -40548,6 +41961,7 @@ "node_id": "MDU6SXNzdWUyNTUwNTU2NDI=", "title": "It is impossible to exit/quit/back in shellcode download menu.", "body": "Hey there,\r\n\r\nit is not possible to exit ZSC or go back to the previous menu when you are in the shellcode download menu.\r\n\r\n**Steps to reproduce:**\r\nStart ZSC and go to \"shellcode\" -> \"download\".\r\nTry 'exit', 'quit' and 'back' and you get an error message as seen below:\r\n\r\n```\r\nzsc> shellcode\r\nzsc/shellcode> download\r\nshellcode_id> exit\r\n[!] you must enter a int value\r\nshellcode_id> quit\r\n[!] you must enter a int value\r\nshellcode_id> back\r\n[!] you must enter a int value\r\n```", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/124", @@ -40577,6 +41991,7 @@ "node_id": "MDU6SXNzdWUyODY5NDc5ODg=", "title": "how to obfuscate with this?", "body": "![sin titulo](https://user-images.githubusercontent.com/13740942/34701749-8b27a718-f4af-11e7-9df9-4bee042ca455.png)\r\n\r\n\r\ni write help and not print nothing", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/127", @@ -40605,6 +42020,7 @@ "node_id": "MDU6SXNzdWUyODgwNTM1NjU=", "title": "ZSC made nothing", "body": "hi, whatever i chose i have an error, i cant see shellstorm shellcode because connection error or anything else. i saw there is ton ofbugs after an update, does it affect all the framework?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/128", @@ -40631,6 +42047,7 @@ "node_id": "MDU6SXNzdWUzMDcwMDQwNjU=", "title": "output shellcode file not showing the shellcode after obfuscate method", "body": "Hello,\r\n\r\nwhen I generated a shellcode, in the `.c` file commented assembly code is non-obfuscate assembly code. it needs to be the same of the shellcode.\r\n\r\nRegards.", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/129", @@ -40659,6 +42076,7 @@ "node_id": "MDU6SXNzdWUzNDM0MDE1MDQ=", "title": "compiling", "body": "i am trying to compile this to executable but getting errors below\r\n\r\n||=== Build: Release in QA (compiler: GNU GCC Compiler) ===|\r\nC:\\~\\Documents\\QA\\main.c|1|error: expected identifier or '(' before '=' token|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|191|error: unknown type name 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|207|error: unknown type name 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|211|error: unknown type name 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|319|error: unknown type name 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|320|error: unknown type name 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|331|error: unknown type name 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|332|error: unknown type name 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|412|error: expected ',' or ';' before 'fread'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|413|error: expected ',' or ';' before 'fwrite'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|565|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|568|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|605|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|606|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\stdio.h|609|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|36|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|37|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|38|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|39|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|40|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|46|error: expected ',' or ';' before 'strcspn'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|49|error: expected ',' or ';' before 'strlen'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|50|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|51|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|52|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|55|error: expected ',' or ';' before 'strspn'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|58|error: expected ',' or ';' before 'strxfrm'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|65|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|66|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|72|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|73|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|77|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|80|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|81|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|90|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|91|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|103|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|104|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|107|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|110|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|115|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|131|error: expected ',' or ';' before 'wcscspn'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|133|error: expected ',' or ';' before 'wcslen'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|134|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|135|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|136|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|139|error: expected ',' or ';' before 'wcsspn'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|142|error: expected ',' or ';' before 'wcsxfrm'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|156|error: expected declaration specifiers or '...' before 'size_t'|\r\nc:\\program files (x86)\\codeblocks\\mingw\\include\\string.h|157|error: expected declaration specifiers or '...' before 'size_t'|\r\n||More errors follow but not being shown.|\r\n||Edit the max errors limit in compiler options...|\r\n||=== Build failed: 50 error(s), 0 warning(s) (0 minute(s), 0 second(s)) ===|\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/130", @@ -40687,6 +42105,7 @@ "node_id": "MDU6SXNzdWU4NzcwMTQ3NjY=", "title": "Proof of code obfuscation functionality", "body": "sorry to bother you, but how can we prove the functionality of code before and after code obfuscation?", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/131", @@ -40713,6 +42132,7 @@ "node_id": "I_kwDOAiJ3Uc5Nqrn_", "title": "Can not update", "body": "Hi,\r\nI want to update the OWASP ZSC via its feature 'update'.\r\nBut, an error message appeared: \"Connection Error!\" like the following screenshot.\r\n\r\nWhy is it? \r\nWhat should I do?\r\n![Screenshot 2022-07-13 14:37:18](https://user-images.githubusercontent.com/33023197/178678211-a9648d4c-2fb3-408d-b9a0-d01a6ffa8233.png)\r\n\r\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/ZSC/issues/133", @@ -40739,6 +42159,7 @@ "node_id": "MDU6SXNzdWU3NzM2MDg5NQ==", "title": "Add script to provide download links to OWASP projects", "body": "For example:\n- @owbot where can I download ZAP?\n- @owbot download Top10?\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Owbot/issues/2", @@ -40765,6 +42186,7 @@ "node_id": "MDU6SXNzdWU3ODUzNTE5Nw==", "title": "Create Slack channel for OWASP presentations", "body": "Starting with OWASP AppSec EU\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Owbot/issues/3", @@ -40793,6 +42215,7 @@ "node_id": "MDU6SXNzdWU3ODUzNTQ3MQ==", "title": "Create OWBot script to find presentations", "body": "Related to #3 \n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Owbot/issues/4", @@ -40817,10 +42240,11 @@ "pk": 1440, "fields": { "nest_created_at": "2024-09-11T21:35:23.633Z", - "nest_updated_at": "2024-09-11T21:35:23.633Z", + "nest_updated_at": "2024-09-13T16:16:16.889Z", "node_id": "MDU6SXNzdWU4MzE0MjkxNw==", "title": "Move OWbot from Heroku into GCE (Google Compute Engine)", "body": "I.e. here https://console.developers.google.com/project/owasp-tests\n", + "summary": "", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/Owbot/issues/5", @@ -40847,6 +42271,7 @@ "node_id": "MDU6SXNzdWU3MTMzNTk3Ng==", "title": "add example code and docs to demonstrate matching data: and tel: URLs", "body": "```\nThere is no existing documentation on how to allow data: and tel: URLs. Add an \nexample that demonstrates how HtmlPolicyBuilder.allowUrlProcotols can be used \nwith data: and tel:.\n\n```\n\nOriginal issue reported on code.google.com by `mikesamuel@gmail.com` on 21 Jan 2014 at 4:04\n", + "summary": "Add example code and documentation for handling `data:` and `tel:` URLs using `HtmlPolicyBuilder`. \n\n1. Create a new documentation section that outlines the usage of `HtmlPolicyBuilder.allowUrlProtocols`.\n2. Include example code snippets demonstrating the configuration of `HtmlPolicyBuilder` to permit `data:` and `tel:` protocols.\n3. Ensure to explain the security implications of allowing these protocols in HTML content.\n4. Consider adding tests to validate that the policy correctly allows and blocks specified URL protocols.\n5. Review existing documentation for consistency and clarity, and update any outdated references.\n\nStart by creating a new documentation file or updating an existing one, and write the example code that showcases the policy configuration. Then, implement unit tests to confirm the functionality.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/21", @@ -40878,6 +42303,7 @@ "node_id": "MDU6SXNzdWU3MTMzNTk3OA==", "title": "Simplify policies that require constraints on a URL based on its protocol", "body": "```\nOnce a: protocol is allowed, policy authors often want to place additional \nconstraints: e.g. a data protocol with an image/... mime-type for use with , or a tel: protocol that contains a valid telephone number.\n\nRight now, policy authors are tempted to do\n\nallowUrlProtocols(\"data\", \"https\", \"http\", \"mailto\")\n\nallowAttributes(\"src\").matching(Pattern.compile(\"^(data:image/(gif|png|jpeg)[,;]\n|http|https|mailto|//)\", Pattern.CASE_INSENSITIVE)\n\nwhich requires duplicative effort.\n\nWe should provide good alternatives to writing regular expressions to match \nURLs as it is error prone.\n\nPerhaps a URL policy that recognizes structure in URLs.\n```\n\nOriginal issue reported on code.google.com by `mikesamuel@gmail.com` on 21 Jan 2014 at 4:09\n", + "summary": "Simplify URL policy constraints based on protocol. \n\n1. Create a unified method for policy authors to specify allowed protocols and additional constraints without duplicating effort.\n2. Introduce a URL policy framework that recognizes URL structures, eliminating the need for complex regular expressions.\n3. Implement a system that allows for specific constraints based on the protocol, such as validating mime-types for data URLs used in `` tags or ensuring valid telephone numbers for `tel:` protocols.\n\n**First Steps:**\n- Define the structure of the new URL policy framework.\n- Identify common use cases for protocol constraints.\n- Develop a prototype that allows policy authors to specify protocols and their corresponding constraints in a more straightforward manner.\n- Test the new framework against existing policies to ensure compatibility and reduce the risk of errors.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/22", @@ -40909,6 +42335,7 @@ "node_id": "MDU6SXNzdWU4NTU1NTA5Mg==", "title": "Allow import of OWASP AntiSamy XML policy files", "body": "To ease migration it would be useful if java-html-sanitizer could import AntiSamy XML policy files.\n", + "summary": "Implement import functionality for OWASP AntiSamy XML policy files in java-html-sanitizer. This enhancement will facilitate easier migration for users transitioning from AntiSamy.\n\n1. Analyze the structure of AntiSamy XML policy files to understand how they define sanitization rules.\n2. Create a parser that reads and interprets the XML format, mapping its elements to the existing java-html-sanitizer rules.\n3. Develop a conversion mechanism that translates AntiSamy rules into equivalent java-html-sanitizer configurations.\n4. Integrate this functionality into the existing codebase and ensure it adheres to the project's coding standards.\n5. Write unit tests to validate that the import process correctly translates AntiSamy policies without loss of functionality or security.\n6. Update documentation to include instructions on how to use the new import feature. \n\nBegin by reviewing existing sanitization rule implementations in java-html-sanitizer and explore how to best represent the AntiSamy rules within that framework.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/38", @@ -40937,6 +42364,7 @@ "node_id": "MDU6SXNzdWUxNTY5MjM0Njk=", "title": "Ability to add new CSS attributes to the CssSchema DEFINITION", "body": "Currently we can only define a whitelist using CssSchema.withProperties method, the properties mentioned in this should be present int the master-set DEFINITIONS mentioned in the class. \nIn case while configuring we need to add properties to this master-set we can can't as there is not public API exposed to do the same.\n", + "summary": "Enhance the CssSchema definition to allow adding new CSS attributes. Currently, only a whitelist can be defined using the `CssSchema.withProperties` method, which restricts properties to those in the master-set DEFINITION. \n\nImplement a public API that enables users to modify the master-set DEFINITION dynamically. To tackle this problem, follow these steps:\n\n1. Review the existing implementation of `CssSchema` and identify where the master-set DEFINITION is defined and utilized.\n2. Create a method, e.g., `CssSchema.addProperty(String propertyName)`, that allows users to append new properties to the master-set DEFINITION.\n3. Ensure that the new properties are validated against CSS standards before being added to the master-set.\n4. Update documentation to reflect the new functionality and provide examples of how to use the method.\n5. Write unit tests to verify that the new properties can be added successfully and are retrievable through the existing API. \n\nBy implementing these steps, enable users to extend the CSS schema according to their needs.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/70", @@ -40963,6 +42391,7 @@ "node_id": "MDU6SXNzdWUxNTY5MjUzMzg=", "title": "Support for CSS3 functions", "body": "Support CSS3 functions like translate, rotate are removed from the HTML styling. These assist in the presentation of information to the end user. \n", + "summary": "Implement support for CSS3 functions such as translate and rotate in the HTML styling. Reintroduce these functions to enhance the presentation of information for users. \n\nFirst, review the current implementation to identify where CSS3 functions were removed. Next, analyze the HTML rendering engine to determine how to integrate CSS3 transformations back into the styling process. \n\nConsider the following steps:\n1. Investigate existing CSS parsing logic to understand how styles are applied.\n2. Modify the CSS parser to recognize and process CSS3 transformation functions.\n3. Update the rendering engine to apply these transformations correctly to HTML elements.\n4. Test the changes with various CSS styles to ensure compatibility and performance. \n\nDocument the implementation process and create test cases to validate the functionality of the reintroduced CSS3 features.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/71", @@ -40988,11 +42417,12 @@ "model": "github.issue", "pk": 1446, "fields": { - "nest_created_at": "2024-09-11T21:35:37.000Z", - "nest_updated_at": "2024-09-11T21:35:37.000Z", + "nest_created_at": "2024-09-11T21:35:37Z", + "nest_updated_at": "2024-09-11T21:35:37Z", "node_id": "MDU6SXNzdWUxNjQzODczMDU=", "title": " tag closed unexpectedly", "body": "`
\n \n \n \n \n \n
\n \n \n \n \n \n \n \n \n
1st td
2nd td
\n
`\nA tr is missing before '2nd td'. Chrome browser/webview automatically adds tr before it, but html sanitizer closes table before it, and '2nd td' becomes another td of outer table.\nHtml sanitizer output:\n`
\n
1st td
2nd td
`\n", + "summary": "Identify the issue with the `` tag in the provided HTML snippet where a `` is missing before the '2nd td', causing unexpected behavior during sanitization. \n\n1. Recognize that the inner ``. Modify the HTML structure to ensure all `` tags.\n \n2. Update the original HTML to include the missing ``:\n ```html\n
` for '2nd td' must be enclosed within a corresponding `
` elements are correctly placed within `
\n \n \n \n \n \n
\n \n \n \n \n \n \n \n \n \n
1st td
2nd td
\n
\n ```\n\n3. Test the updated HTML in various browsers to ensure consistent rendering and that no automatic corrections are made by the browser.\n\n4. Review the HTML sanitizer's behavior to confirm it does not alter the structure unexpectedly. Investigate the sanitizing library's documentation for any configuration options that may influence `` handling.\n\n5. Document findings and solutions in the GitHub issue for future reference and ensure similar mistakes are avoided in the future.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/77", @@ -41007,8 +42437,8 @@ "author": 692, "repository": 1155, "assignees": [ - 236, - 689 + 689, + 236 ], "labels": [ 274 @@ -41024,6 +42454,7 @@ "node_id": "MDU6SXNzdWUxNzI0MDg3ODA=", "title": "  escaped to \" \"", "body": "For example, I have the following input:\n`
`\n\nAfter sanitizing, it became the \"test bob\", but when we displaying it into a `

` tag,\nit will be `

test bob

`, this is not what we want as the count of blank is not 8, can someone look at this issue?\n", + "summary": "Investigate the issue with HTML entity escaping of non-breaking spaces (` `). When sanitizing input like ``, ensure that the non-breaking spaces are preserved correctly in the output.\n\n1. Identify where the sanitization process occurs in the code.\n2. Modify the sanitization logic to replace ` ` with a string of spaces instead of a single space. Consider using a character count to match the number of ` ` entities.\n3. Ensure that when rendering the string in a `

` tag, the spaces maintain their intended count. You might need to use CSS or a different HTML element (like `

`) that respects whitespace.\n\nTest the changes with various inputs to confirm that the output retains the correct number of spaces.",
     "state": "open",
     "state_reason": "",
     "url": "https://github.com/OWASP/java-html-sanitizer/issues/87",
@@ -41050,6 +42481,7 @@
     "node_id": "MDU6SXNzdWUxODAwMDQ0MTE=",
     "title": "Duplicate attributes removed without reporting to HtmlChangeListener",
     "body": "Consider the following HTML:\n\n```\nlegal link\n```\n\nI would expect the following code to output _Discarded attribute_:\n\n```\npublic static void main(String[] args) {\n        String sanitized = Sanitizers.LINKS.sanitize(\"legal link\", new HtmlChangeListener() {\n            @Override\n            public void discardedTag(@Nullable Object context, String elementName) {\n                System.out.println(\"Discarded tag\");\n            }\n\n            @Override\n            public void discardedAttributes(@Nullable Object context, String tagName, String... attributeNames) {\n                System.out.println(\"Discarded attribute\");\n            }\n        }, null);\n\n        System.out.println(sanitized);\n    }\n```\n\nThe outputted HTML is as it should:\n\n```\nlegal link\n```\n\nHowever, the sanitation is not reported as an HTML change. This is critical in a case where the sanitizer is used for validation, as the input would be assumed to be valid.\n\nI'd be happy to submit a PR if I could get a few pointers. Do you have any initial thoughts about:\n1. Where and how the fix should be done? General pointers are okay.\n2. Any refactorings that should be done in `HtmlChangeReporterTest`? It doesn't look very ready for another test case right now 😉. \n",
+    "summary": "Address the issue of duplicate attributes being removed in the HTML sanitizer without notifying the `HtmlChangeListener`. \n\n1. Investigate the sanitization process within the `Sanitizers.LINKS` class. Locate the section of the code responsible for attribute handling and determine where the logic for discarding duplicate attributes is implemented. Ensure that the `discardedAttributes` method of the `HtmlChangeListener` is invoked whenever an attribute is discarded.\n\n2. Modify the `HtmlChangeReporterTest` class to incorporate a new test case that specifically checks for discarded attributes. Assess the current structure of the class to identify necessary refactorings for accommodating this additional test case. Ensure the test setup can validate both the output HTML and the reporting of discarded attributes.\n\n3. Implement unit tests to confirm that the changes effectively report discarded attributes when duplicates are removed. \n\n4. Consider potential edge cases, such as different attribute casing or variations in how attributes are specified, and ensure comprehensive test coverage.\n\nBegin by reviewing the sanitization logic and adding debug statements to understand the flow of attribute handling. Then, proceed with the modifications and tests to ensure the desired outcome is achieved.",
     "state": "open",
     "state_reason": "",
     "url": "https://github.com/OWASP/java-html-sanitizer/issues/94",
@@ -41076,6 +42508,7 @@
     "node_id": "MDU6SXNzdWUxODA5NTIyODI=",
     "title": "empty ``s misnest",
     "body": "wynne.jg reports\n\n> Sanitizing\n> \n> ``` html\n> \n> ```\n> \n> yields the following\n> \n> ``` html\n> \n> ```\n\n---\n\nNote the ``s are nesting in the output but are siblings in the input.\n",
+    "summary": "Fix misnesting of `` elements in sanitized HTML. The current sanitization process incorrectly nests `` elements that should be siblings. \n\n1. Review the HTML sanitization logic in the codebase to identify how it processes `` and `` tags.\n2. Implement a check to ensure that `` elements within a `` remain siblings during sanitization, rather than nesting them.\n3. Write unit tests to validate the correct handling of sibling `` elements in various scenarios.\n4. Test with the provided input to ensure expected output matches the input structure without nesting.\n5. Document the changes made to the sanitization process for future reference. \n\nStart by examining the sanitization function that handles `` and `` tags.",
     "state": "open",
     "state_reason": "",
     "url": "https://github.com/OWASP/java-html-sanitizer/issues/96",
@@ -41104,6 +42537,7 @@
     "node_id": "MDU6SXNzdWUxODk4MjMzOTM=",
     "title": "is it possible to allow text in ` not being sanitized at all. Any idea on how to achieve this?",
+    "summary": "Allow text in the ```` content with StylingPolicy then letting add a parent css selector per line item could be a nice feature.\r\nFor example, following style content:\r\n```\r\n\r\n```\r\n\r\ncan be transformed to \r\n\r\n```\r\n\r\n```\r\nThis feature cannot be implemented in the applications since StylingPolicy class is a package class.",
+    "summary": "Implement a feature to sanitize `\r\n```\r\n\r\nAfter removing the text `ZZZ` from the comment the content of the e-mail body is displayed properly. As it is treating the comment as a nested tag inside the style tag and searching for the closing of the nested tag `` it considers it as the closing of the nested tag `` and leaving the other tags unbalanced.\r\n\r\nI have also gone through the code of the [HtmlLexer](https://github.com/OWASP/java-html-sanitizer/blob/main/src/main/java/org/owasp/html/HtmlLexer.java) but not able to figure it out since the parsing is happening based on one character at a time so regex pattern matching will not be possible to handle this issue.\r\n\r\nIt will be great if someone can guide me on how to handle this situation or it can be considered as an enhancement or bugfix.",
+    "summary": "Address the HTML comment sanitization issue in the Zimbra library. Identify the problem where the library fails to properly parse comments containing specific text (e.g., `ZZZ`), leading to incorrect rendering of email body content. \n\n1. **Analyze the parsing logic** in the `HtmlLexer` class to understand how comments are processed and how they interact with nested tags.\n2. **Investigate character-by-character parsing** to find a way to differentiate between actual HTML tags and text within comments.\n3. **Consider implementing a state machine** or a similar mechanism to manage parsing contexts effectively, ensuring that comments do not interfere with tag recognition.\n4. **Create unit tests** that replicate the problematic HTML comment structure to validate the parsing logic once adjustments are made.\n5. **Engage with the community** for feedback on proposed solutions or enhancements, and check if this issue aligns with existing bug reports or feature requests.\n\nBegin by reviewing the `HtmlLexer` codebase, focusing on how comments are handled and the logic surrounding tag detection.",
     "state": "open",
     "state_reason": "",
     "url": "https://github.com/OWASP/java-html-sanitizer/issues/235",
@@ -42436,6 +43919,7 @@
     "node_id": "I_kwDOAhEkFs49IUAv",
     "title": "Overflow is sanitized",
     "body": "Hello, the codingame game engine which allow people to create games for its platform use your sanitizer on the game documentation.\r\nWhen I tried to implement some overflow on tables, it got sanitized. The game engine principal contributor [told me that he allowed everything he could](https://github.com/CodinGame/codingame-game-engine/issues/42#issuecomment-942396105) and that he didn't know your sanitizer would delete overflows.\r\n\r\nIs this possible to allow them ?\r\n\r\nHere is the [part of their code where they sanitize the game statement](https://github.com/CodinGame/codingame-game-engine/blob/master/runner/src/main/java/com/codingame/gameengine/runner/Renderer.java#L900), and here is the part of my html page including the overflow : \r\n```html\r\n             
\r\n\r\n
\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n
Bot classDamage per bulletBullet per shotAim duration (frame)Shot duration (frame)Precision short rangePrecision mid rangePrecision long rangeSpeedHealthShield
Assault30034295%55%15%1.250003000
\r\n \r\n```\r\n", + "summary": "Investigate the issue of overflow sanitization in the CodinGame game engine. Review the relevant code in the sanitation process located at `Renderer.java` (line 900). Confirm if the sanitizer is responsible for removing the `overflow-x:auto;` style from HTML elements.\n\n1. Analyze the sanitizer's current configurations to identify what styles or attributes it allows or disallows.\n2. Modify the sanitizer logic to permit the CSS overflow properties that are essential for table rendering.\n3. Test the changes by running the game engine with the provided HTML snippet to ensure the overflow behavior is preserved.\n4. Collaborate with the CodinGame team to document the update and potential impacts on existing games. \n\nConsider adding unit tests to verify that the overflow property is not stripped in future updates.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/236", @@ -42462,6 +43946,7 @@ "node_id": "I_kwDOAhEkFs492G_B", "title": "Attributes allowed globally together with \"style\" are lost", "body": "Since the commit 020d5d0d7b8e985be32d3608612a9889135ef060 all attributes that are allowed globally are ignored, if \"style\" is given as the first attribute.\r\n\r\nProblematic code:\r\n```\r\n public HtmlPolicyBuilder globally() {\r\n if(attributeNames.get(0).equals(\"style\")) {\r\n return allowStyling();\r\n } else {\r\n return HtmlPolicyBuilder.this.allowAttributesGlobally(\r\n policy, attributeNames);\r\n }\r\n }\r\n```\r\n\r\nProof\r\n ```\r\n @Test\r\n public static final void testStyleWithOtherAttributesGlobally() {\r\n PolicyFactory policyBuilder = new HtmlPolicyBuilder()\r\n .allowAttributes(\"style\", \"align\").globally()\r\n .allowElements(\"a\", \"label\", \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\")\r\n .toFactory();\r\n String input = \"

This is some green text

\";\r\n String want = \"

This is some green text

\";\r\n assertEquals(want, policyBuilder.sanitize(input));\r\n }\r\n```\r\n_Note that `align=\"center\"` is missing from the output._\r\n\r\nI will file a PR to fix the issue", + "summary": "Identify and fix the issue where globally allowed attributes are lost when \"style\" is the first attribute in the `globally()` method. \n\nAnalyze the problematic code and focus on the conditional check within the `globally()` method:\n```java\npublic HtmlPolicyBuilder globally() {\n if(attributeNames.get(0).equals(\"style\")) {\n return allowStyling();\n } else {\n return HtmlPolicyBuilder.this.allowAttributesGlobally(policy, attributeNames);\n }\n}\n```\nNotice that when \"style\" is first, the method bypasses allowing other global attributes, leading to loss of attributes like `align`.\n\nReview the provided test case `testStyleWithOtherAttributesGlobally()` which demonstrates the issue. The expected output contains both `style` and `align`, but the current implementation only retains `style`.\n\nTo tackle the problem, consider modifying the `globally()` method to ensure all globally allowed attributes are processed regardless of the order. \n\nFirst steps:\n1. Modify the `globally()` method to always invoke `allowAttributesGlobally(policy, attributeNames)` after `allowStyling()`.\n2. Update the logic to ensure that it processes all attributes in `attributeNames`, whether \"style\" is first or not.\n3. Run existing tests to verify that the fix does not introduce new issues.\n4. Write additional tests if necessary to cover edge cases where other attributes might be ignored. \n\nPrepare a PR to implement these changes once the solution is verified.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/237", @@ -42488,6 +43973,7 @@ "node_id": "I_kwDOAhEkFs4_iby3", "title": "Comments don't get removed if inside curly brackets", "body": "I noticed that comments are not removed if they are placed inside curly brackets.\r\n\r\n## Example\r\n```\r\nPolicyFactory policy = new HtmlPolicyBuilder().allowElements(\"p\").toFactory();\r\n\r\nString unsanitized = \"

{}

\";\r\nString sanitized = policy.sanitize(unsanitized);\r\nString expected = \"

{}

\";\r\nSystem.out.println(\"Expected: \" + expected + \"\\nActual: \" + sanitized);\r\n\r\n```\r\n\r\nThe above code prints:\r\n```\r\nExpected:

{}

\r\nActual:

{}

\r\n```\r\n\r\nThis also happens, if the brackets and comment are not nested inside a paragraph element (e.g. `{}`). Surprisingly, the comments is removed if there is a whitespace between bracket and comment (e.g. ` {}`). ", + "summary": "Investigate the issue of comments not being removed when placed inside curly brackets in the `HtmlPolicyBuilder` sanitization process. \n\n1. Identify how the `HtmlPolicyBuilder` processes comments within curly brackets. \n2. Analyze the handling of whitespace in relation to comment removal. \n3. Create test cases to replicate the issue, ensuring to include variations of curly brackets and comments, both nested and not nested within HTML elements.\n\nStart by debugging the comment filtering logic in the library. Review the regex or parsing rules utilized for sanitization, specifically where they interact with curly brackets. Ensure to check if there are any conditions that prevent the removal of comments when encapsulated in `{}`.\n\nImplement a fix by modifying the comment removal logic to account for comments within curly brackets. Test the proposed solution against the existing test cases to validate that the expected output matches actual results, particularly ensuring it handles cases with and without whitespace correctly. \n\nFinally, document the changes made and update any related documentation to reflect the new behavior.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/239", @@ -42514,6 +44000,7 @@ "node_id": "I_kwDOAhEkFs5AqRJe", "title": "Issue when using < as a the comparaison string", "body": "Hi,\r\nIs there a way to sanitize something like `If b is 0 and a is 1 then balert(\"Hello\")", "body": "Hi,\r\n\r\nString entryText = \"\"_ for this I am expecting **\"This is test 'Hello'\"**\r\n\r\nIs it possible to achieve this?\r\n", + "summary": "Sanitize HTML input to allow specific tags and attributes while escaping others. \n\n1. Identify the expected behavior: \n - For `String entryText = \"\"`, you expect the output to be `\"This is test 'Hello'\"`.\n\n2. Investigate the current implementation of `HtmlPolicyBuilder` to understand its behavior regarding script tags and text content.\n\n3. Modify the `HtmlPolicyBuilder` configuration:\n - Allow specific text content to remain unescaped while ensuring that script tags are sanitized or stripped out.\n\n4. Implement a custom policy:\n - Create a new policy that explicitly defines which tags to allow and how to handle script tags.\n - Use methods like `.allowElements()` and `.allowAttributes()` to tailor the policy to your needs.\n\n5. Test the implementation:\n - Run tests with various HTML strings to ensure the output matches the expected results.\n\n6. Review the documentation of the library to ensure compliance with security standards, particularly regarding XSS (Cross-Site Scripting) prevention.\n\n7. Finally, document the changes made and the rationale behind the policy configuration for future reference.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/241", @@ -42566,6 +44054,7 @@ "node_id": "I_kwDOAhEkFs5BMIds", "title": "CSS 3 and 4 not supported", "body": "As reported in other issues, CSS 3 and 4 properties are not defined in DEFINITIONS in https://github.com/OWASP/java-html-sanitizer/blob/main/src/main/java/org/owasp/html/CssSchema.java\r\n\r\nis there a reason to map the DEFINITIONS to lower level CSS? any plan to add CSS 3/4 support or allow customized definitions.\r\n\r\nThanks,\r\nLaura\r\n", + "summary": "Add support for CSS 3 and 4 properties in the `DEFINITIONS` of `CssSchema.java`. Currently, the lack of these properties limits the functionality of the Java HTML Sanitizer. Investigate the rationale behind mapping to lower-level CSS and consider the implications of introducing support for CSS 3 and 4.\n\n1. Review the existing `DEFINITIONS` in `CssSchema.java` to identify which CSS 3 and 4 properties are missing.\n2. Assess the current mapping strategy to lower-level CSS and determine whether it's necessary or beneficial to maintain.\n3. Propose a plan to include CSS 3 and 4 properties, considering backward compatibility and existing usage.\n4. Explore options for allowing users to define custom CSS properties that can be sanitized, enhancing flexibility.\n5. Create an issue for each major CSS 3 and 4 property that needs to be added, facilitating community contributions.\n\nEngage with the community to gather feedback and prioritize the implementation of desired features.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/242", @@ -42592,6 +44081,7 @@ "node_id": "I_kwDOAhEkFs5BOuYj", "title": "Enhancement: Extend HtmlChangeListener to also include the rejected content/value", "body": "Something like the following would be really nice:\r\n\r\n```\r\npublic interface HtmlChangeListener {\r\n\r\n /** Called when a tag is discarded from the input. */\r\n public void discardedTag(@Nullable T context, String elementName, String rejectedContent);\r\n\r\n /**\r\n * Called when attribute is discarded\r\n * from the input but the containing tag is not.\r\n */\r\n public void discardedAttribute(\r\n @Nullable T context, String tagName, String attributeName, String rejectedValue);\r\n}\r\n```", + "summary": "Enhance the `HtmlChangeListener` interface to include methods for handling rejected content and values. Implement the following methods:\n\n1. `discardedTag(@Nullable T context, String elementName, String rejectedContent)`: Trigger this method when a tag is discarded from the input, passing the context, element name, and the rejected content.\n\n2. `discardedAttribute(@Nullable T context, String tagName, String attributeName, String rejectedValue)`: Trigger this method when an attribute is discarded but the containing tag remains, passing the context, tag name, attribute name, and the rejected value.\n\n**First Steps:**\n- Modify the `HtmlChangeListener` interface to include the new methods.\n- Implement these methods in any existing classes that currently implement `HtmlChangeListener`.\n- Update any existing logic that processes HTML changes to handle the new information about rejected tags and attributes.\n- Write unit tests to ensure that the new methods are called correctly with the appropriate parameters when tags or attributes are rejected.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/243", @@ -42618,6 +44108,7 @@ "node_id": "I_kwDOAhEkFs5BX5L9", "title": "Randomized test failure", "body": "While compiling I tripped a randomized test failure for testUnderStress. Setting the seed replicates the issue. \r\n\r\nOne note, I did this from a Windows 10 machine and to get the lexer test working I had to change the whitespace on the htmllexer resource files to LF from CRLF. I note that in case there is something particular to my environment and there is difficulty replicating this issue.\r\n\r\nOutput was :\r\n```junit.framework.ComparisonFailure: not idempotent, seed=1641590962954, css=`.*/<\t\r\n-->õ/*/*R\t,\\0\r\nfoo \t< \r\n  \t\t\\a࠷\r\n\t*/�important.*/U+a-->*/*/style\\a\r\n\"𐐂*/\"]\\\r\n` \r\nExpected :. */ < õ �important. */ U+a- *\r\nActual :. */ < õ �important. */ U+a- - *```\r\n\r\n\r\n", + "summary": "Investigate the randomized test failure in `testUnderStress` encountered on a Windows 10 machine. Replicate the issue by setting the seed to `1641590962954`. Note that changing the whitespace in the `htmllexer` resource files from CRLF to LF was necessary for the lexer test to function.\n\n1. Set up a Windows 10 environment to replicate the issue, ensuring the same conditions as the original test run.\n2. Compile the project and run `testUnderStress` using the specified seed.\n3. Examine the output, focusing on the `ComparisonFailure` details:\n - Confirm the expected and actual outputs:\n - Expected: `:. */ < õ �important. */ U+a- *`\n - Actual: `:. */ < õ �important. */ U+a- - *`\n4. Investigate the source code related to the `testUnderStress` to identify potential causes for the discrepancy in expected vs. actual outputs.\n5. Review the handling of randomization in tests to ensure consistent behavior across environments.\n6. Consider adding logging or debugging statements around the areas where randomness is introduced to trace the execution flow and data state. \n\nProceed with these steps to diagnose and resolve the issue effectively.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/245", @@ -42644,6 +44135,7 @@ "node_id": "I_kwDOAhEkFs5CSTlQ", "title": "Don't Support ", "body": "\r\n * any input that causes the problem : \r\n * The default policy: EbayPolicyExample \r\n * the output you expect: \r\n\r\nThe exmaple Code:\r\n String html = \"\";\r\n\t\tString expected = \"\";\r\n\t\tSanitizedResult result =org.owasp.html.HtmlSanitizer.sanitize(html, hcr.getWrappedPolicy());\r\n\t\tassertEquals(expected, result.getCleanHTML());\r\n\r\nIn my case, the input with CDATA is OK. But, after Sanitizer, the output data is empty. Is it possible to support CDATA? \r\n", + "summary": "Implement support for CDATA in the HTML sanitizer. \n\n1. Identify the issue: The sanitizer currently removes CDATA sections, causing output to be empty when CDATA is included in the input. \n\n2. Review the current sanitizing policy in `EbayPolicyExample` and understand how it processes input strings. \n\n3. Modify the sanitizer logic to recognize and preserve CDATA sections. This may involve:\n - Adding a check for `` tags in the input.\n - Allowing these tags to pass through the sanitizer without being stripped out.\n\n4. Update the test case to confirm that the sanitizer correctly returns the expected output when CDATA is present. \n\n5. Run the tests to ensure that the changes do not break existing functionality. \n\n6. Document the changes and update the policy to include a note on CDATA handling. \n\nBy following these steps, ensure that the sanitizer functions as expected with CDATA input.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/246", @@ -42670,6 +44162,7 @@ "node_id": "I_kwDOAhEkFs5CTXiI", "title": "Array out of bounds exception on HtmlPolicyBuilder initialization", "body": "Constructing a HtmlPolicyBuilder zero-defined global attributes, globally, leads to an Arry out of bounds exception\r\n\r\nHere is sample code that produces the problem\r\n\r\n```java\r\nnew HtmlPolicyBuilder().allowElements().allowAttributes().globally().toFactory();\r\n```\r\n\r\n\r\nThe exception comes form an un-guarded check on the zeroth element of the attributesNames list. In this situation, attribute names is empty and so has no zeroth element.\r\n\r\n\r\n```java\r\n public HtmlPolicyBuilder globally() {\r\n if(attributeNames.get(0).equals(\"style\")) {\r\n return allowStyling();\r\n } else {\r\n return HtmlPolicyBuilder.this.allowAttributesGlobally(\r\n policy, attributeNames);\r\n }\r\n }\r\n```\r\n\r\nThis construction used to work in version 20180219.1 of this library but is broken in 20211018.2.", + "summary": "Fix the ArrayIndexOutOfBoundsException in the `HtmlPolicyBuilder` when initializing with zero-defined global attributes. The exception arises because the method `globally()` attempts to access the zeroth element of an empty `attributeNames` list.\n\n### Steps to Address the Issue:\n\n1. **Guard the Access**: Modify the `globally()` method to check if `attributeNames` is empty before accessing its zeroth element. Use a conditional statement to prevent accessing an index that doesn't exist.\n\n ```java\n public HtmlPolicyBuilder globally() {\n if (attributeNames.isEmpty() || !attributeNames.get(0).equals(\"style\")) {\n return HtmlPolicyBuilder.this.allowAttributesGlobally(policy, attributeNames);\n } else {\n return allowStyling();\n }\n }\n ```\n\n2. **Test the Fix**: After implementing the guard clause, create unit tests that cover cases with both empty and populated `attributeNames` to ensure the issue is resolved and does not occur again.\n\n3. **Check Version Compatibility**: Review the changes made between versions 20180219.1 and 20211018.2 of the library to identify any other potential breaking changes.\n\n4. **Document the Change**: Update the documentation to reflect the behavior of the `globally()` method when called with no defined attributes, ensuring users understand the expected input and output.\n\n5. **Release a New Version**: Once the fix is validated through testing, prepare and release a new version of the library containing the fix.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/247", @@ -42696,6 +44189,7 @@ "node_id": "I_kwDOAhEkFs5Cp_kb", "title": "Consider clarifying thread safety javadocs", "body": "## Observation\r\nAfter reading the [javadocs on thread safety for the HTML policy builder](https://github.com/OWASP/java-html-sanitizer/blob/33d319f876abbb35cb95eddf9705c46bd96822bd/src/main/java/org/owasp/html/HtmlPolicyBuilder.java#L144-L154), I am somewhat, but not entirely, confident about what I should be doing.\r\n\r\nVery clear:\r\n> This class is not thread-safe. \r\n\r\n2nd paragraph is pretty clear too:\r\n> The resulting policy will not violate its security guarantees as a result of race conditions, but is not thread safe because it maintains state to track whether text inside disallowed elements should be suppressed.\r\n\r\n3d paragraph is maybe not so clear?:\r\n> The resulting policy can be reused, but if you use the {@link HtmlPolicyBuilder#toFactory()} method instead of {@link #build}, then binding policies to output channels is cheap so there's no need.\r\n\r\n## Proposal\r\nMaybe reword the 2nd and 3rd paragraphs to focus on what the user needs to know?\r\n\r\n> The policy returned by {@link #build} is stateful and not thread-safe.\r\n\r\n> For thread-safety, use the policy factory returned by {@link #toFactory()}.\r\n\r\nIf I've got that right? Think I do, but not 100% sure.\r\n\r\n## Watcha think?\r\nMaybe a good idea? Maybe not? Happy to hear back from those in know!", + "summary": "Clarify the thread safety documentation in the HTML Policy Builder javadocs. Address the ambiguity in the second and third paragraphs to enhance user understanding. \n\n1. Emphasize that the policy returned by `#build` is stateful and not thread-safe. \n2. Clearly state that for thread safety, developers should utilize the policy factory obtained via `#toFactory()`.\n\n### Possible First Steps:\n- Review the current javadocs for accuracy and clarity.\n- Rewrite the specified paragraphs to align with the proposed changes.\n- Consider adding examples demonstrating safe usage patterns for both methods.\n- Request input from other contributors or maintainers to ensure the proposed changes meet the project's standards.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/250", @@ -42722,6 +44216,7 @@ "node_id": "I_kwDOAhEkFs5Ei0Es", "title": "Issue for tr td th tbody", "body": "The input is like below:\r\n````md\r\n```html\r\ndata\" or \"data\r\n```\r\n````\r\n , for th and tbody, the case is similiar. The common point is that there is no table tag. \r\nThe output is always append table and tbody, it looks like below:\r\n\r\n````md\r\n```html\r\n
data
\r\n```\r\n````\r\n\r\n\r\nIs it possible to just check whether the tag is legal and don't append other tag?\r\n\r\nFor example, if the input is \r\n````md\r\n```html\r\ndata\r\n```\r\n````\r\n, the expected output is \r\n````md\r\n```html\r\ndata\r\n```\r\n````\r\n but not append table and tbody tag.\r\n", + "summary": "Ensure proper handling of HTML table tags in input data. When processing input such as:\n\n````md\n```html\ndata\n````\nor \n````md\n```html\ndata\n````\n\nCheck for the presence of a `` tag before appending it. If the input does not contain a `
` tag, retain the original structure without adding `
` and `` tags. \n\n### Steps to tackle the problem:\n\n1. **Parse the Input:** Use an HTML parser to read the input string and identify existing table elements.\n \n2. **Check for Table Tags:** Implement a function that checks if the input contains `
`, ``, or `
` tags.\n\n3. **Conditional Appending:** If these tags are absent, return the input as is. If they are present, wrap the content with `` and `` as needed.\n\n4. **Testing:** Create test cases for various inputs to ensure that only valid structures result in tag appending.\n\n5. **Documentation:** Update the documentation to clarify the expected behavior for users regarding input handling.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/252", @@ -42748,6 +44243,7 @@ "node_id": "I_kwDOAhEkFs5E1TgK", "title": "How to validate and throw error on invalid closing tags?", "body": "For an instance,\n```\n this is a para

\n```\nAbove html text results into invalid html text because of tag .\n\n\nBut, if I write html as shown below then it's not giving any error and results into a valid html text.\n```\n

this is para \n```\nExpected result :- we want to validate closing tags same as it is validating opening tags and should return invalid closing tags in result set of string.\n\nPlease help on this.", + "summary": "Validate and throw errors for invalid closing tags in HTML. Implement a function that checks if closing tags match their corresponding opening tags. \n\n1. Parse the HTML string and extract all opening and closing tags.\n2. Create a stack data structure to keep track of opening tags as they are encountered.\n3. For each closing tag, check if it matches the most recent opening tag on the stack:\n - If it matches, pop the opening tag from the stack.\n - If it does not match, record an error for the invalid closing tag.\n4. After processing the string, ensure that all opening tags have corresponding closing tags. If the stack is not empty, report the unmatched opening tags as errors.\n\nFirst steps:\n- Write a function to extract tags from the HTML string using regular expressions.\n- Implement the stack mechanism to validate the tags.\n- Create a mechanism to return a list of errors for any mismatched tags found during validation.", "state": "open", "state_reason": "", "url": "https://github.com/OWASP/java-html-sanitizer/issues/253", @@ -42774,6 +44270,7 @@ "node_id": "I_kwDOAhEkFs5HDudx", "title": "Content after script tag completely deleted?", "body": "Hello there,\r\n\r\nfor some reason, OWASP Java HTML Sanitizer seems just to remove all content after a ```\r\nwould then be replaced by something like\r\n``` <script>alert`1`</script> ```.\r\n\r\nThank you for any answer ;)\r\n\r\np.s. the idea behind my question is that I would like to use a policy that does not know if it deals with a string that will be used as inner html or as an \"ordinary text field\" with no html but where we could read a text about the \"` tags in an HTML document), an attacker can do a successful attack without communicating with any external party. Note, that also the `nonce` for the `] [4] Adding request argument (GET): name \"q\", value \"\">\"\r\n[157893220069.056272] [/?q=\">] [4] Starting phase REQUEST_HEADERS. (SecRules 1)\r\n[157893220069.056272] [/?q=\">] [9] This phase consists of 135 rule(s).\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 200000) Executing operator \"Rx\" with param \"(?:application(?:/soap\\+|/)|text/)xml\" against REQUEST_HEADERS:Content-Type.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 200001) Executing operator \"Rx\" with param \"application/json\" against REQUEST_HEADERS:Content-Type.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 900990) Executing unconditional rule...\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:crs_setup_version with value: 330\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901001) Executing operator \"Eq\" with param \"0\" against TX:crs_setup_version.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:crs_setup_version)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901100) Executing operator \"Eq\" with param \"0\" against TX:inbound_anomaly_score_threshold.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:inbound_anomaly_score_threshold)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:inbound_anomaly_score_threshold with value: 5\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901110) Executing operator \"Eq\" with param \"0\" against TX:outbound_anomaly_score_threshold.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:outbound_anomaly_score_threshold)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:outbound_anomaly_score_threshold with value: 4\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901120) Executing operator \"Eq\" with param \"0\" against TX:paranoia_level.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:paranoia_level)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:paranoia_level with value: 1\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901125) Executing operator \"Eq\" with param \"0\" against TX:executing_paranoia_level.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:executing_paranoia_level)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:executing_paranoia_level with value: 1\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901130) Executing operator \"Eq\" with param \"0\" against TX:sampling_percentage.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:sampling_percentage)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:sampling_percentage with value: 100\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901140) Executing operator \"Eq\" with param \"0\" against TX:critical_anomaly_score.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:critical_anomaly_score)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:critical_anomaly_score with value: 5\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901141) Executing operator \"Eq\" with param \"0\" against TX:error_anomaly_score.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:error_anomaly_score)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:error_anomaly_score with value: 4\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901142) Executing operator \"Eq\" with param \"0\" against TX:warning_anomaly_score.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:warning_anomaly_score)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:warning_anomaly_score with value: 3\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901143) Executing operator \"Eq\" with param \"0\" against TX:notice_anomaly_score.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:notice_anomaly_score)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:notice_anomaly_score with value: 2\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901150) Executing operator \"Eq\" with param \"0\" against TX:do_reput_block.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:do_reput_block)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:do_reput_block with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901152) Executing operator \"Eq\" with param \"0\" against TX:reput_block_duration.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:reput_block_duration)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:reput_block_duration with value: 300\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901160) Executing operator \"Eq\" with param \"0\" against TX:allowed_methods.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:allowed_methods)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:allowed_methods with value: GET HEAD POST OPTIONS\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901162) Executing operator \"Eq\" with param \"0\" against TX:allowed_request_content_type.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:allowed_request_content_type)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:allowed_request_content_type with value: application/x-www-form-urlencoded|multipart/form-data|text/xml|application/xml|application/soap+xml|application/x-amf|application/json|application/octet-stream|application/csp-report|application/xss-auditor-report|text/plain\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901168) Executing operator \"Eq\" with param \"0\" against TX:allowed_request_content_type_charset.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:allowed_request_content_type_charset)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:allowed_request_content_type_charset with value: utf-8|iso-8859-1|iso-8859-15|windows-1252\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901163) Executing operator \"Eq\" with param \"0\" against TX:allowed_http_versions.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:allowed_http_versions)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:allowed_http_versions with value: HTTP/1.0 HTTP/1.1 HTTP/2 HTTP/2.0\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901164) Executing operator \"Eq\" with param \"0\" against TX:restricted_extensions.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:restricted_extensions)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:restricted_extensions with value: .asa/ .asax/ .ascx/ .axd/ .backup/ .bak/ .bat/ .cdx/ .cer/ .cfg/ .cmd/ .com/ .config/ .conf/ .cs/ .csproj/ .csr/ .dat/ .db/ .dbf/ .dll/ .dos/ .htr/ .htw/ .ida/ .idc/ .idq/ .inc/ .ini/ .key/ .licx/ .lnk/ .log/ .mdb/ .old/ .pass/ .pdb/ .pol/ .printer/ .pwd/ .rdb/ .resources/ .resx/ .sql/ .swp/ .sys/ .vb/ .vbs/ .vbproj/ .vsdisco/ .webinfo/ .xsd/ .xsx/\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901165) Executing operator \"Eq\" with param \"0\" against TX:restricted_headers.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:restricted_headers)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:restricted_headers with value: /proxy/ /lock-token/ /content-range/ /if/\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901166) Executing operator \"Eq\" with param \"0\" against TX:static_extensions.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:static_extensions)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:static_extensions with value: /.jpg/ /.jpeg/ /.png/ /.gif/ /.js/ /.css/ /.ico/ /.svg/ /.webp/\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901167) Executing operator \"Eq\" with param \"0\" against TX:enforce_bodyproc_urlencoded.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:enforce_bodyproc_urlencoded)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:enforce_bodyproc_urlencoded with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901200) Executing unconditional rule...\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:anomaly_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:anomaly_score_pl1 with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:anomaly_score_pl2 with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:anomaly_score_pl3 with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:anomaly_score_pl4 with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:sql_injection_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:xss_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:rfi_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:lfi_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:rce_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:php_injection_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:http_violation_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:session_fixation_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:inbound_anomaly_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:outbound_anomaly_score with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:outbound_anomaly_score_pl1 with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:outbound_anomaly_score_pl2 with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:outbound_anomaly_score_pl3 with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:outbound_anomaly_score_pl4 with value: 0\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:sql_error_match with value: 0\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901318) Executing operator \"Rx\" with param \"^.*$\" against REQUEST_HEADERS:User-Agent.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:sha1: \"V��ٶ��RՕ��_ׂ)+$�\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:hexEncode: \"56c1a7d9b6b7cf5217d595b3825fd782292b24cc\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"56c1a7d9b6b7cf5217d595b3825fd782292b24cc\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:ua_hash with value: 56c1a7d9b6b7cf5217d595b3825fd782292b24cc\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901321) Executing unconditional rule...\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:real_ip with value: 172.17.0.1\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: initcol\r\n[157893220069.056272] [/?q=\">] [5] Collection `global' initialized with value: global\r\n[157893220069.056272] [/?q=\">] [9] Running action: initcol\r\n[157893220069.056272] [/?q=\">] [5] Collection `ip' initialized with value: 172.17.0.1_56c1a7d9b6b7cf5217d595b3825fd782292b24cc\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901340) Executing operator \"Rx\" with param \"(?:URLENCODED|MULTIPART|XML|JSON)\" against REQBODY_PROCESSOR.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\" (Variable: REQBODY_PROCESSOR)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [9] Saving msg: Enabling body inspection\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: paranoia-level/1\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: noauditlog\r\n[157893220069.056272] [/?q=\">] [9] Running action: ctl\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901350) Executing operator \"Eq\" with param \"1\" against TX:enforce_bodyproc_urlencoded.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"0\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:enforce_bodyproc_urlencoded)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901400) Executing operator \"Eq\" with param \"100\" against TX:sampling_percentage.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"100\" (Variable: TX:sampling_percentage)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-SAMPLING\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '901410' due to a SecMarker: END-SAMPLING\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '901420' due to a SecMarker: END-SAMPLING\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '901430' due to a SecMarker: END-SAMPLING\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '901440' due to a SecMarker: END-SAMPLING\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '901450' due to a SecMarker: END-SAMPLING\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-SAMPLING\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-SAMPLING\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 6 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 901500) Executing operator \"Lt\" with param \"1\" Was: \"\" against TX:executing_paranoia_level.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:executing_paranoia_level)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 905100) Executing operator \"StrEq\" with param \"GET /\" against REQUEST_LINE.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"GET /?q=\"> HTTP/1.1\" (Variable: REQUEST_LINE)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 905110) Executing operator \"IpMatch\" with param \"127.0.0.1,::1\" against REMOTE_ADDR.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"172.17.0.1\" (Variable: REMOTE_ADDR)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 910011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 910013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-910-IP-REPUTATION\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '910015' due to a SecMarker: END-REQUEST-910-IP-REPUTATION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '910017' due to a SecMarker: END-REQUEST-910-IP-REPUTATION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-910-IP-REPUTATION\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-910-IP-REPUTATION\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 911011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 911013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-911-METHOD-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '911015' due to a SecMarker: END-REQUEST-911-METHOD-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '911017' due to a SecMarker: END-REQUEST-911-METHOD-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-911-METHOD-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-911-METHOD-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 912100) Executing operator \"Eq\" with param \"0\" against TX:dos_burst_time_slice.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:dos_burst_time_slice)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [4] Executing chained rule.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 0) Executing operator \"Eq\" with param \"0\" against TX:dos_counter_threshold.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:dos_counter_threshold)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [4] Executing chained rule.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 0) Executing operator \"Eq\" with param \"0\" against TX:dos_block_timeout.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:dos_block_timeout)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-DOS-PROTECTION-CHECKS\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '912011' due to a SecMarker: END-DOS-PROTECTION-CHECKS\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '912120' due to a SecMarker: END-DOS-PROTECTION-CHECKS\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '912130' due to a SecMarker: END-DOS-PROTECTION-CHECKS\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '912013' due to a SecMarker: END-DOS-PROTECTION-CHECKS\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '912015' due to a SecMarker: END-DOS-PROTECTION-CHECKS\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '912017' due to a SecMarker: END-DOS-PROTECTION-CHECKS\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-DOS-PROTECTION-CHECKS\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-912-DOS-PROTECTION\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-DOS-PROTECTION-CHECKS\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-DOS-PROTECTION-CHECKS\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 8 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 913011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 913013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '913015' due to a SecMarker: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '913017' due to a SecMarker: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920160) Executing operator \"Rx\" with param \"^\\d+$\" against REQUEST_HEADERS:Content-Length.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920470) Executing operator \"Rx\" with param \"^[\\w/.+-]+(?:\\s?;\\s?(?:boundary|charset)\\s?=\\s?['\\\"\\w.()+,/:=?-]+)?$\" against REQUEST_HEADERS:Content-Type.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920480) Executing operator \"Rx\" with param \"charset\\s*=\\s*[\\\"']?([^;\\\"'\\s]+)\" against REQUEST_HEADERS:Content-Type.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920430) Executing operator \"Within\" with param \"HTTP/1.0 HTTP/1.1 HTTP/2 HTTP/2.0\" Was: \"\" against REQUEST_PROTOCOL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"HTTP/1.1\" (Variable: REQUEST_PROTOCOL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920015' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920490' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920017' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 4 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921160) Executing operator \"Rx\" with param \"[\\n\\r]+(?:\\s|location|refresh|(?:set-)?cookie|(?:x-)?(?:forwarded-(?:for|host|server)|host|via|remote-ip|remote-addr|originating-IP))\\s*:\" against ARGS_GET_NAMES|ARGS_GET.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_GET_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS_GET:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921190) Executing operator \"Rx\" with param \"[\\n\\r]\" against REQUEST_FILENAME.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/\" (Variable: REQUEST_FILENAME)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '921015' due to a SecMarker: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '921017' due to a SecMarker: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 930011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 930013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-930-APPLICATION-ATTACK-LFI\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '930015' due to a SecMarker: END-REQUEST-930-APPLICATION-ATTACK-LFI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '930017' due to a SecMarker: END-REQUEST-930-APPLICATION-ATTACK-LFI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-930-APPLICATION-ATTACK-LFI\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-930-APPLICATION-ATTACK-LFI\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 931011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 931013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '931015' due to a SecMarker: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '931017' due to a SecMarker: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '932015' due to a SecMarker: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '932017' due to a SecMarker: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '933015' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '933017' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 941011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 941013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-941-APPLICATION-ATTACK-XSS\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '941015' due to a SecMarker: END-REQUEST-941-APPLICATION-ATTACK-XSS\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '941017' due to a SecMarker: END-REQUEST-941-APPLICATION-ATTACK-XSS\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-941-APPLICATION-ATTACK-XSS\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-941-APPLICATION-ATTACK-XSS\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 942011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 942013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-942-APPLICATION-ATTACK-SQLI\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '942015' due to a SecMarker: END-REQUEST-942-APPLICATION-ATTACK-SQLI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '942017' due to a SecMarker: END-REQUEST-942-APPLICATION-ATTACK-SQLI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-942-APPLICATION-ATTACK-SQLI\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-942-APPLICATION-ATTACK-SQLI\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 943011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 943013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '943015' due to a SecMarker: END-REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '943017' due to a SecMarker: END-REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 949011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 949013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-949-BLOCKING-EVALUATION\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '949015' due to a SecMarker: END-REQUEST-949-BLOCKING-EVALUATION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '949017' due to a SecMarker: END-REQUEST-949-BLOCKING-EVALUATION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-949-BLOCKING-EVALUATION\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-949-BLOCKING-EVALUATION\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 980011) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 980013) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-RESPONSE-980-CORRELATION\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '980015' due to a SecMarker: END-RESPONSE-980-CORRELATION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '980017' due to a SecMarker: END-RESPONSE-980-CORRELATION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-RESPONSE-980-CORRELATION\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-RESPONSE-980-CORRELATION\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] Starting phase REQUEST_BODY. (SecRules 2)\r\n[157893220069.056272] [/?q=\">] [9] This phase consists of 285 rule(s).\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 200002) Executing operator \"Eq\" with param \"0\" against REQBODY_ERROR.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: REQBODY_ERROR)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 200003) Executing operator \"Eq\" with param \"0\" against MULTIPART_STRICT_ERROR.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\" (Variable: MULTIPART_STRICT_ERROR)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 200004) Executing operator \"Eq\" with param \"1\" against MULTIPART_UNMATCHED_BOUNDARY.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\" (Variable: MULTIPART_UNMATCHED_BOUNDARY)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 200005) Executing operator \"StrEq\" with param \"0\" against TX:regex(^MSC_).\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 910012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 910000) Executing operator \"Eq\" with param \"1\" against TX:DO_REPUT_BLOCK.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:DO_REPUT_BLOCK)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 910100) Executing operator \"Rx\" with param \"^$\" against TX:HIGH_RISK_COUNTRY_CODES.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 910120) Executing operator \"Eq\" with param \"1\" against IP:PREVIOUS_RBL_CHECK.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 910130) Executing operator \"Eq\" with param \"0\" against TX:block_suspicious_ip.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:block_suspicious_ip)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [4] Executing chained rule.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 0) Executing operator \"Eq\" with param \"0\" against TX:block_harvester_ip.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:block_harvester_ip)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [4] Executing chained rule.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 0) Executing operator \"Eq\" with param \"0\" against TX:block_spammer_ip.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:block_spammer_ip)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [4] Executing chained rule.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 0) Executing operator \"Eq\" with param \"0\" against TX:block_search_ip.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:block_search_ip)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: paranoia-level/1\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-RBL-CHECK\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '910140' due to a SecMarker: END-RBL-CHECK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '910150' due to a SecMarker: END-RBL-CHECK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '910160' due to a SecMarker: END-RBL-CHECK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '910170' due to a SecMarker: END-RBL-CHECK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '910180' due to a SecMarker: END-RBL-CHECK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '910190' due to a SecMarker: END-RBL-CHECK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-RBL-CHECK\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-RBL-LOOKUP\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-RBL-CHECK\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-RBL-CHECK\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 8 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 910014) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-910-IP-REPUTATION\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '910016' due to a SecMarker: END-REQUEST-910-IP-REPUTATION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '910018' due to a SecMarker: END-REQUEST-910-IP-REPUTATION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-910-IP-REPUTATION\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-910-IP-REPUTATION\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 911012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 911100) Executing operator \"Within\" with param \"GET HEAD POST OPTIONS\" Was: \"\" against REQUEST_METHOD.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"GET\" (Variable: REQUEST_METHOD)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 911014) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-911-METHOD-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '911016' due to a SecMarker: END-REQUEST-911-METHOD-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '911018' due to a SecMarker: END-REQUEST-911-METHOD-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-911-METHOD-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-911-METHOD-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 912012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 912014) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-912-DOS-PROTECTION\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '912016' due to a SecMarker: END-REQUEST-912-DOS-PROTECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '912018' due to a SecMarker: END-REQUEST-912-DOS-PROTECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-912-DOS-PROTECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-912-DOS-PROTECTION\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 913012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 913100) Executing operator \"PmFromFile\" with param \"scanners-user-agents.data\" against REQUEST_HEADERS:User-Agent.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"curl/7.54.0\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 913110) Executing operator \"PmFromFile\" with param \"scanners-headers.data\" against REQUEST_HEADERS_NAMES|REQUEST_HEADERS.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"host\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"host\" (Variable: REQUEST_HEADERS_NAMES:Host)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"user-agent\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"user-agent\" (Variable: REQUEST_HEADERS_NAMES:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"accept\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"accept\" (Variable: REQUEST_HEADERS_NAMES:Accept)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"localhost:8080\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"localhost:8080\" (Variable: REQUEST_HEADERS:Host)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"curl/7.54.0\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"*/*\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"*/*\" (Variable: REQUEST_HEADERS:Accept)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 913120) Executing operator \"PmFromFile\" with param \"scanners-urls.data\" against REQUEST_FILENAME|ARGS.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/\" (Variable: REQUEST_FILENAME)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 913014) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '913101' due to a SecMarker: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '913102' due to a SecMarker: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '913016' due to a SecMarker: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '913018' due to a SecMarker: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-913-SCANNER-DETECTION\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 5 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920100) Executing operator \"Rx\" with param \"^(?i:(?:[a-z]{3,10}\\s+(?:\\w{3,7}?://[\\w\\-\\./]*(?::\\d+)?)?/[^?#]*(?:\\?[^#\\s]*)?(?:#[\\S]*)?|connect (?:\\d{1,3}\\.){3}\\d{1,3}\\.?(?::\\d+)?|options \\*)\\s+[\\w\\./]+|get /[^?#]*(?:\\?[^#\\s]*)?(?:#[\\S]*)?)$\" against REQUEST_LINE.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"GET /?q=\"> HTTP/1.1\" (Variable: REQUEST_LINE)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920120) Executing operator \"Rx\" with param \"(?] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920170) Executing operator \"Rx\" with param \"^(?:GET|HEAD)$\" against REQUEST_METHOD.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"GET\" (Variable: REQUEST_METHOD)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [9] This rule severity is: 2 current transaction is: 255\r\n[157893220069.056272] [/?q=\">] [9] Saving msg: GET or HEAD Request with Body Content.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [4] Executing chained rule.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 0) Executing operator \"Rx\" with param \"^0?$\" against REQUEST_HEADERS:Content-Length.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920171) Executing operator \"Rx\" with param \"^(?:GET|HEAD)$\" against REQUEST_METHOD.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"GET\" (Variable: REQUEST_METHOD)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [9] This rule severity is: 2 current transaction is: 2\r\n[157893220069.056272] [/?q=\">] [9] Saving msg: GET or HEAD Request with Transfer-Encoding.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [4] Executing chained rule.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 0) Executing operator \"Eq\" with param \"0\" against REQUEST_HEADERS:Transfer-Encoding.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: REQUEST_HEADERS:Transfer-Encoding)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920180) Executing operator \"Rx\" with param \"^POST$\" against REQUEST_METHOD.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"GET\" (Variable: REQUEST_METHOD)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920190) Executing operator \"Rx\" with param \"(\\d+)-(\\d+)\\,\" against REQUEST_HEADERS:Range|REQUEST_HEADERS:Request-Range.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920210) Executing operator \"Rx\" with param \"\\b(?:keep-alive|close),\\s?(?:keep-alive|close)\\b\" against REQUEST_HEADERS:Connection.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920220) Executing operator \"Rx\" with param \"\\x25\" against REQUEST_URI.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/?q=\">\" (Variable: REQUEST_URI)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920240) Executing operator \"Rx\" with param \"^(?i)application/x-www-form-urlencoded\" against REQUEST_HEADERS:Content-Type.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920250) Executing operator \"Eq\" with param \"1\" against TX:CRS_VALIDATE_UTF8_ENCODING.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920260) Executing operator \"Rx\" with param \"\\%u[fF]{2}[0-9a-fA-F]{2}\" against REQUEST_URI|REQUEST_BODY.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/?q=\">\" (Variable: REQUEST_URI)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\" (Variable: REQUEST_BODY)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920270) Executing operator \"ValidateByteRange\" with param \"1-255\" against REQUEST_URI|REQUEST_HEADERS|ARGS|ARGS_NAMES.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"/?q=\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/?q=\">\" (Variable: REQUEST_URI)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"localhost:8080\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"localhost:8080\" (Variable: REQUEST_HEADERS:Host)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"curl/7.54.0\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"*/*\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"*/*\" (Variable: REQUEST_HEADERS:Accept)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920280) Executing operator \"Eq\" with param \"0\" against REQUEST_HEADERS:Host.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: REQUEST_HEADERS:Host)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920290) Executing operator \"Rx\" with param \"^$\" against REQUEST_HEADERS:Host.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"localhost:8080\" (Variable: REQUEST_HEADERS:Host)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920310) Executing operator \"Rx\" with param \"^$\" against REQUEST_HEADERS:Accept.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"*/*\" (Variable: REQUEST_HEADERS:Accept)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920311) Executing operator \"Rx\" with param \"^$\" against REQUEST_HEADERS:Accept.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"*/*\" (Variable: REQUEST_HEADERS:Accept)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920330) Executing operator \"Rx\" with param \"^$\" against REQUEST_HEADERS:User-Agent.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"curl/7.54.0\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920340) Executing operator \"Rx\" with param \"^0$\" against REQUEST_HEADERS:Content-Length.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920350) Executing operator \"Rx\" with param \"^[\\d.:]+$\" against REQUEST_HEADERS:Host.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"localhost:8080\" (Variable: REQUEST_HEADERS:Host)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920380) Executing operator \"Eq\" with param \"1\" against TX:MAX_NUM_ARGS.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:MAX_NUM_ARGS)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920360) Executing operator \"Eq\" with param \"1\" against TX:ARG_NAME_LENGTH.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:ARG_NAME_LENGTH)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920370) Executing operator \"Eq\" with param \"1\" against TX:ARG_LENGTH.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:ARG_LENGTH)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920390) Executing operator \"Eq\" with param \"1\" against TX:TOTAL_ARG_LENGTH.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:TOTAL_ARG_LENGTH)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920400) Executing operator \"Eq\" with param \"1\" against TX:MAX_FILE_SIZE.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:MAX_FILE_SIZE)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920410) Executing operator \"Eq\" with param \"1\" against TX:COMBINED_FILE_SIZES.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"0\" (Variable: TX:COMBINED_FILE_SIZES)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920420) Executing operator \"Rx\" with param \"^[^;\\s]+\" against REQUEST_HEADERS:Content-Type.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920440) Executing operator \"Rx\" with param \"\\.([^.]+)$\" against REQUEST_BASENAME.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\" (Variable: REQUEST_BASENAME)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920500) Executing operator \"Rx\" with param \"\\.[^.~]+~(?:/.*|)$\" against REQUEST_FILENAME.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/\" (Variable: REQUEST_FILENAME)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920450) Executing operator \"Rx\" with param \"^.*$\" against REQUEST_HEADERS_NAMES.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"host\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"host\" (Variable: REQUEST_HEADERS_NAMES:Host)\r\n[157893220069.056272] [/?q=\">] [7] Added regex subexpression TX.0: host\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:header_name_host with value: /host/\r\n[157893220069.056272] [/?q=\">] [9] This rule severity is: 2 current transaction is: 2\r\n[157893220069.056272] [/?q=\">] [9] Saving msg: HTTP header is restricted by policy (host)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"user-agent\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"user-agent\" (Variable: REQUEST_HEADERS_NAMES:User-Agent)\r\n[157893220069.056272] [/?q=\">] [7] Added regex subexpression TX.0: user-agent\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:header_name_user-agent with value: /user-agent/\r\n[157893220069.056272] [/?q=\">] [9] This rule severity is: 2 current transaction is: 2\r\n[157893220069.056272] [/?q=\">] [9] Saving msg: HTTP header is restricted by policy (user-agent)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"accept\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"accept\" (Variable: REQUEST_HEADERS_NAMES:Accept)\r\n[157893220069.056272] [/?q=\">] [7] Added regex subexpression TX.0: accept\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:header_name_accept with value: /accept/\r\n[157893220069.056272] [/?q=\">] [9] This rule severity is: 2 current transaction is: 2\r\n[157893220069.056272] [/?q=\">] [9] Saving msg: HTTP header is restricted by policy (accept)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [4] Executing chained rule.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 0) Executing operator \"Within\" with param \"/proxy/ /lock-token/ /content-range/ /if/\" Was: \"\" against TX:regex(^HEADER_NAME_).\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 920014) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920200' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920201' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920230' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920300' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920271' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920320' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920121' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920341' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920016' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920272' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920018' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920202' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920273' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920274' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920275' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '920460' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-920-PROTOCOL-ENFORCEMENT\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 17 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921110) Executing operator \"Rx\" with param \"[\\n\\r]+(?:get|post|head|options|connect|put|delete|trace|track|patch|propfind|propatch|mkcol|copy|move|lock|unlock)\\s+[^\\s]+(?:\\s+http|[\\r\\n])\" against ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921120) Executing operator \"Rx\" with param \"[\\r\\n]\\W*?(?:content-(?:type|length)|set-cookie|location):\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921130) Executing operator \"Rx\" with param \"(?:\\bhttp\\/(?:0\\.9|1\\.[01])|<(?:html|meta)\\b)\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921140) Executing operator \"Rx\" with param \"[\\n\\r]\" against REQUEST_HEADERS_NAMES|REQUEST_HEADERS.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"Host\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"Host\" (Variable: REQUEST_HEADERS_NAMES:Host)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"User-Agent\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"User-Agent\" (Variable: REQUEST_HEADERS_NAMES:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"Accept\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"Accept\" (Variable: REQUEST_HEADERS_NAMES:Accept)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"localhost:8080\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"localhost:8080\" (Variable: REQUEST_HEADERS:Host)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"curl/7.54.0\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"*/*\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"*/*\" (Variable: REQUEST_HEADERS:Accept)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921150) Executing operator \"Rx\" with param \"[\\n\\r]\" against ARGS_NAMES.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 921014) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '921151' due to a SecMarker: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '921016' due to a SecMarker: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '921170' due to a SecMarker: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '921180' due to a SecMarker: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '921018' due to a SecMarker: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-921-PROTOCOL-ATTACK\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 6 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 930012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 930100) Executing operator \"Rx\" with param \"(?i)(?:\\x5c|(?:%(?:c(?:0%(?:[2aq]f|5c|9v)|1%(?:[19p]c|8s|af))|2(?:5(?:c(?:0%25af|1%259c)|2f|5c)|%46|f)|(?:(?:f(?:8%8)?0%8|e)0%80%a|bg%q)f|%3(?:2(?:%(?:%6|4)6|F)|5%%63)|u(?:221[56]|002f|EFC8|F025)|1u|5c)|0x(?:2f|5c)|\\/))(?:%(?:(?:f(?:(?:c%80|8)%8)?0%8|e)0%80%ae|2(?:(?:5(?:c0%25a|2))?e|%45)|u(?:(?:002|ff0)e|2024)|%32(?:%(?:%6|4)5|E)|c0(?:%[256aef]e|\\.))|\\.(?:%0[01]|\\?)?|\\?\\.?|0x2e){2}(?:\\x5c|(?:%(?:c(?:0%(?:[2aq]f|5c|9v)|1%(?:[19p]c|8s|af))|2(?:5(?:c(?:0%25af|1%259c)|2f|5c)|%46|f)|(?:(?:f(?:8%8)?0%8|e)0%80%a|bg%q)f|%3(?:2(?:%(?:%6|4)6|F)|5%%63)|u(?:221[56]|002f|EFC8|F025)|1u|5c)|0x(?:2f|5c)|\\/))\" against REQUEST_URI_RAW|REQUEST_BODY|REQUEST_HEADERS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/?q=\">\" (Variable: REQUEST_URI_RAW)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\" (Variable: REQUEST_BODY)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"localhost:8080\" (Variable: REQUEST_HEADERS:Host)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"curl/7.54.0\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"*/*\" (Variable: REQUEST_HEADERS:Accept)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 930110) Executing operator \"Rx\" with param \"(?:^|[\\\\/])\\.\\.(?:[\\\\/]|$)\" against REQUEST_URI|REQUEST_BODY|REQUEST_HEADERS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"/?q=\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"/?q=\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:removeNulls: \"/?q=\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (1) t:cmdLine: \"/?q=>\"\r\n[157893220069.056272] [/?q=\">] [9] multiMatch is enabled. 2 values to be tested.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/?q=\">\" (Variable: REQUEST_URI)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/?q=>\" (Variable: REQUEST_URI)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:removeNulls: \"\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \"\"\r\n[157893220069.056272] [/?q=\">] [9] multiMatch is enabled. 1 values to be tested.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\" (Variable: REQUEST_BODY)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"localhost:8080\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"localhost:8080\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:removeNulls: \"localhost:8080\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \"localhost:8080\"\r\n[157893220069.056272] [/?q=\">] [9] multiMatch is enabled. 1 values to be tested.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"localhost:8080\" (Variable: REQUEST_HEADERS:Host)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:removeNulls: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] multiMatch is enabled. 1 values to be tested.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"curl/7.54.0\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"*/*\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"*/*\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:removeNulls: \"*/*\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \"*/*\"\r\n[157893220069.056272] [/?q=\">] [9] multiMatch is enabled. 1 values to be tested.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"*/*\" (Variable: REQUEST_HEADERS:Accept)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 930120) Executing operator \"PmFromFile\" with param \"lfi-os-files.data\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:normalizePathWin: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:normalizePathWin: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 930130) Executing operator \"PmFromFile\" with param \"restricted-files.data\" against REQUEST_FILENAME.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:normalizePathWin: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/\" (Variable: REQUEST_FILENAME)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 930014) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-930-APPLICATION-ATTACK-LFI\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '930016' due to a SecMarker: END-REQUEST-930-APPLICATION-ATTACK-LFI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '930018' due to a SecMarker: END-REQUEST-930-APPLICATION-ATTACK-LFI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-930-APPLICATION-ATTACK-LFI\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-930-APPLICATION-ATTACK-LFI\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 3 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 931012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 931100) Executing operator \"Rx\" with param \"^(?i:file|ftps?|https?):\\/\\/(?:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\" against ARGS.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 931110) Executing operator \"Rx\" with param \"(?i)(?:\\binclude\\s*\\([^)]*|mosConfig_absolute_path|_CONF\\[path\\]|_SERVER\\[DOCUMENT_ROOT\\]|GALLERY_BASEDIR|path\\[docroot\\]|appserv_root|config\\[root_dir\\])=(?:file|ftps?|https?):\\/\\/\" against QUERY_STRING|REQUEST_BODY.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q=\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q=\">\" (Variable: QUERY_STRING)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\" (Variable: REQUEST_BODY)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 931120) Executing operator \"Rx\" with param \"^(?i:file|ftps?|https?).*?\\?+$\" against ARGS.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 931014) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '931130' due to a SecMarker: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '931016' due to a SecMarker: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '931018' due to a SecMarker: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-931-APPLICATION-ATTACK-RFI\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 4 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932100) Executing operator \"Rx\" with param \"(?:;|\\{|\\||\\|\\||&|&&|\\n|\\r|\\$\\(|\\$\\(\\(|`|\\${|<\\(|>\\(|\\(\\s*\\))\\s*(?:{|\\s*\\(\\s*|\\w+=(?:[^\\s]*|\\$.*|\\$.*|<.*|>.*|\\'.*\\'|\\\".*\\\")\\s+|!\\s*|\\$)*\\s*(?:'|\\\")*(?:[\\?\\*\\[\\]\\(\\)\\-\\|+\\w'\\\"\\./\\\\\\\\]+/)?[\\\\\\\\'\\\"]*(?:l[\\\\\\\\'\\\"]*(?:w[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*-[\\\\\\\\'\\\"]*(?:d[\\\\\\\\'\\\"]*(?:o[\\\\\\\\'\\\"]*w[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*d|u[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*p)|r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*q[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*t|m[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*r)|s(?:[\\\\\\\\'\\\"]*(?:b[\\\\\\\\'\\\"]*_[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*e|c[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*u|m[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*d|p[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*i|u[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*b|-[\\\\\\\\'\\\"]*F|h[\\\\\\\\'\\\"]*w|o[\\\\\\\\'\\\"]*f))?|z[\\\\\\\\'\\\"]*(?:(?:[ef][\\\\\\\\'\\\"]*)?g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|c[\\\\\\\\'\\\"]*(?:a[\\\\\\\\'\\\"]*t|m[\\\\\\\\'\\\"]*p)|m[\\\\\\\\'\\\"]*(?:o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e|a)|d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s)|e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*(?:(?:f[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*l|p[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*p)[\\\\\\\\'\\\"]*e|e[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*o|(?:\\s|<|>).*)|a[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*(?:l[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*g(?:[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n)?|c[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*m|(?:\\s|<|>).*)|o[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*e|l)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|g[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*e)|d[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*g|d[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|f[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*p(?:[\\\\\\\\'\\\"]*g[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*t)?|(?:[np]|y[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*x)[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|b[\\\\\\\\'\\\"]*(?:z[\\\\\\\\'\\\"]*(?:(?:[ef][\\\\\\\\'\\\"]*)?g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s|m[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e|c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t|i[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*2)|s[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t|i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|t[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*r)|a[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*(?:\\s|<|>).*|s[\\\\\\\\'\\\"]*h)|r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*k[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*w|u[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n)|c[\\\\\\\\'\\\"]*(?:o[\\\\\\\\'\\\"]*(?:m[\\\\\\\\'\\\"]*(?:p[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s|m[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*d)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|p[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*c)|h[\\\\\\\\'\\\"]*(?:d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*(?:\\s|<|>).*|f[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*g[\\\\\\\\'\\\"]*s|a[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*r|m[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*d)|r[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*b|(?:[cp]|a[\\\\\\\\'\\\"]*t)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|u[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*l|s[\\\\\\\\'\\\"]*h)|f[\\\\\\\\'\\\"]*(?:i(?:[\\\\\\\\'\\\"]*(?:l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*t|(?:\\s|<|>).*)|n[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*(?:\\s|<|>).*))?|t[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*(?:s[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*s|w[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*o|(?:\\s|<|>).*)|u[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*n|(?:e[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*h|c)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*h|g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p)|e[\\\\\\\\'\\\"]*(?:n[\\\\\\\\'\\\"]*(?:v(?:[\\\\\\\\'\\\"]*-[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*e)?|d[\\\\\\\\'\\\"]*(?:i[\\\\\\\\'\\\"]*f|s[\\\\\\\\'\\\"]*w))|x[\\\\\\\\'\\\"]*(?:p[\\\\\\\\'\\\"]*(?:a[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*d|o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*t|r)|e[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|c[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*(?:\\s|<|>).*|g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|s[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*c|v[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*l)|h[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*(?:d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*g[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*t|p[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*w[\\\\\\\\'\\\"]*d)|o[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*(?:n[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*e|i[\\\\\\\\'\\\"]*d)|(?:e[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*d|u[\\\\\\\\'\\\"]*p)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|i[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*y)|i[\\\\\\\\'\\\"]*(?:p[\\\\\\\\'\\\"]*(?:(?:6[\\\\\\\\'\\\"]*)?t[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*b[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s|c[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*g)|r[\\\\\\\\'\\\"]*b(?:[\\\\\\\\'\\\"]*(?:1(?:[\\\\\\\\'\\\"]*[89])?|2[\\\\\\\\'\\\"]*[012]))?|f[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*g|d[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|g[\\\\\\\\'\\\"]*(?:(?:e[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*l|r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|c[\\\\\\\\'\\\"]*c|i[\\\\\\\\'\\\"]*t)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|z[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t|i[\\\\\\\\'\\\"]*p)|u[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*z[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*p|d[\\\\\\\\'\\\"]*b)|a[\\\\\\\\'\\\"]*(?:(?:l[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*s|w[\\\\\\\\'\\\"]*k)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|d[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*r|p[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*-[\\\\\\\\'\\\"]*g[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*t|r[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*(?:\\s|<|>).*|p))|d[\\\\\\\\'\\\"]*(?:h[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*t|(?:i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|u)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|(?:m[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s|p[\\\\\\\\'\\\"]*k)[\\\\\\\\'\\\"]*g|o[\\\\\\\\'\\\"]*(?:a[\\\\\\\\'\\\"]*s|n[\\\\\\\\'\\\"]*e)|a[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*h)|m[\\\\\\\\'\\\"]*(?:(?:k[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*r|o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|a[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*(?:x[\\\\\\\\'\\\"]*(?:\\s|<|>).*|q)|l[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*e)|j[\\\\\\\\'\\\"]*(?:(?:a[\\\\\\\\'\\\"]*v[\\\\\\\\'\\\"]*a|o[\\\\\\\\'\\\"]*b[\\\\\\\\'\\\"]*s)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|e[\\\\\\\\'\\\"]*x[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*c)|k[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*(?:a[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*l|(?:\\s|<|>).*)|(?:G[\\\\\\\\'\\\"]*E[\\\\\\\\'\\\"]*T[\\\\\\\\'\\\"]*(?:\\s|<|>)|\\.\\s).*|7[\\\\\\\\'\\\"]*z(?:[\\\\\\\\'\\\"]*[ar])?)\\b\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932105) Executing operator \"Rx\" with param \"(?:;|\\{|\\||\\|\\||&|&&|\\n|\\r|\\$\\(|\\$\\(\\(|`|\\${|<\\(|>\\(|\\(\\s*\\))\\s*(?:{|\\s*\\(\\s*|\\w+=(?:[^\\s]*|\\$.*|\\$.*|<.*|>.*|\\'.*\\'|\\\".*\\\")\\s+|!\\s*|\\$)*\\s*(?:'|\\\")*(?:[\\?\\*\\[\\]\\(\\)\\-\\|+\\w'\\\"\\./\\\\\\\\]+/)?[\\\\\\\\'\\\"]*(?:s[\\\\\\\\'\\\"]*(?:e[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*(?:(?:f[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*)?(?:\\s|<|>).*|e[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*v|s[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*d)|n[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*l|d[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|h[\\\\\\\\'\\\"]*(?:\\.[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*b|u[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*w[\\\\\\\\'\\\"]*n|(?:\\s|<|>).*)|o[\\\\\\\\'\\\"]*(?:(?:u[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*e|r[\\\\\\\\'\\\"]*t)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t)|c[\\\\\\\\'\\\"]*(?:h[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*d|p[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|t[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*g[\\\\\\\\'\\\"]*s|(?:l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*e|f[\\\\\\\\'\\\"]*t)[\\\\\\\\'\\\"]*p|y[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*l|u[\\\\\\\\'\\\"]*(?:(?:\\s|<|>).*|d[\\\\\\\\'\\\"]*o)|d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|s[\\\\\\\\'\\\"]*h|v[\\\\\\\\'\\\"]*n)|p[\\\\\\\\'\\\"]*(?:k[\\\\\\\\'\\\"]*(?:g(?:(?:[\\\\\\\\'\\\"]*_)?[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*o)?|e[\\\\\\\\'\\\"]*x[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*c|i[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*l)|t[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*r(?:[\\\\\\\\'\\\"]*(?:d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p))?|a[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*(?:\\s|<|>).*|s[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*w[\\\\\\\\'\\\"]*d)|r[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*(?:e[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*v|f[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|y[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*n(?:[\\\\\\\\'\\\"]*(?:3(?:[\\\\\\\\'\\\"]*m)?|2))?|e[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*(?:l(?:[\\\\\\\\'\\\"]*(?:s[\\\\\\\\'\\\"]*h|5))?|m[\\\\\\\\'\\\"]*s)|(?:g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e|f[\\\\\\\\'\\\"]*t)[\\\\\\\\'\\\"]*p|(?:u[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*h|o[\\\\\\\\'\\\"]*p)[\\\\\\\\'\\\"]*d|h[\\\\\\\\'\\\"]*p(?:[\\\\\\\\'\\\"]*[57])?|i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*g|s[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|n[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*(?:\\.[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*l|o[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*b[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*d)|(?:\\s|<|>).*|a[\\\\\\\\'\\\"]*t)|e[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*(?:k[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*-[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*p|(?:s[\\\\\\\\'\\\"]*t|c)[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t|(?:\\s|<|>).*)|s[\\\\\\\\'\\\"]*(?:l[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*k[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*p|t[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t)|(?:a[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*o|i[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*e)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|(?:o[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*u|m[\\\\\\\\'\\\"]*a)[\\\\\\\\'\\\"]*p|p[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*g)|r[\\\\\\\\'\\\"]*(?:e[\\\\\\\\'\\\"]*(?:(?:p[\\\\\\\\'\\\"]*(?:l[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*e|e[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t)|n[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*e)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|a[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*h)|m[\\\\\\\\'\\\"]*(?:(?:d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*)?(?:\\s|<|>).*|u[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*r)|u[\\\\\\\\'\\\"]*b[\\\\\\\\'\\\"]*y(?:[\\\\\\\\'\\\"]*(?:1(?:[\\\\\\\\'\\\"]*[89])?|2[\\\\\\\\'\\\"]*[012]))?|(?:a[\\\\\\\\'\\\"]*r|c[\\\\\\\\'\\\"]*p|p[\\\\\\\\'\\\"]*m)[\\\\\\\\'\\\"]*(?:\\s|<|>).*|n[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*o|o[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*e|s[\\\\\\\\'\\\"]*y[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*c)|t[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*(?:p[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*e|i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*g)|s[\\\\\\\\'\\\"]*h)|r[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*e(?:[\\\\\\\\'\\\"]*6)?|e[\\\\\\\\'\\\"]*(?:l[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*t|e[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|i[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*(?:o[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*t|(?:\\s|<|>).*)|a[\\\\\\\\'\\\"]*(?:i[\\\\\\\\'\\\"]*l(?:[\\\\\\\\'\\\"]*f)?|r[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|o[\\\\\\\\'\\\"]*(?:u[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*(?:\\s|<|>).*|p))|u[\\\\\\\\'\\\"]*(?:n[\\\\\\\\'\\\"]*(?:l[\\\\\\\\'\\\"]*(?:i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*k[\\\\\\\\'\\\"]*(?:\\s|<|>).*|z[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*a)|c[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s|a[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*e|r[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*r|s[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*t|z[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*p|x[\\\\\\\\'\\\"]*z)|s[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*(?:(?:a[\\\\\\\\'\\\"]*d|m[\\\\\\\\'\\\"]*o)[\\\\\\\\'\\\"]*d|d[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*l)|l[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|m[\\\\\\\\'\\\"]*(?:y[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*q[\\\\\\\\'\\\"]*l(?:[\\\\\\\\'\\\"]*(?:d[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*p(?:[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*w)?|h[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*y|a[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n|s[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*w))?|(?:(?:o[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*n|u[\\\\\\\\'\\\"]*t)[\\\\\\\\'\\\"]*t|v)[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|x[\\\\\\\\'\\\"]*(?:z[\\\\\\\\'\\\"]*(?:(?:[ef][\\\\\\\\'\\\"]*)?g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|d[\\\\\\\\'\\\"]*(?:i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|e[\\\\\\\\'\\\"]*c)|c[\\\\\\\\'\\\"]*(?:a[\\\\\\\\'\\\"]*t|m[\\\\\\\\'\\\"]*p)|l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s|m[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e|(?:\\s|<|>).*)|a[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*g[\\\\\\\\'\\\"]*s|t[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*m|x[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*(?:\\s|<|>).*)|z[\\\\\\\\'\\\"]*(?:(?:[ef][\\\\\\\\'\\\"]*)?g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|c[\\\\\\\\'\\\"]*(?:a[\\\\\\\\'\\\"]*t|m[\\\\\\\\'\\\"]*p)|d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|i[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*(?:\\s|<|>).*|l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s|m[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e|r[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*n|s[\\\\\\\\'\\\"]*h)|o[\\\\\\\\'\\\"]*(?:p[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*l|n[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*r)|w[\\\\\\\\'\\\"]*(?:h[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*(?:a[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*i|(?:\\s|<|>).*)|g[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*t|3[\\\\\\\\'\\\"]*m)|v[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*(?:m[\\\\\\\\'\\\"]*(?:\\s|<|>).*|g[\\\\\\\\'\\\"]*r|p[\\\\\\\\'\\\"]*w)|y[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*m)\\b\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932110) Executing operator \"Rx\" with param \"(?i)(?:;|\\{|\\||\\|\\||&|&&|\\n|\\r|`)\\s*[\\(,@\\'\\\"\\s]*(?:[\\w'\\\"\\./]+/|[\\\\\\\\'\\\"\\^]*\\w[\\\\\\\\'\\\"\\^]*:.*\\\\\\\\|[\\^\\.\\w '\\\"/\\\\\\\\]*\\\\\\\\)?[\\\"\\^]*(?:m[\\\"\\^]*(?:y[\\\"\\^]*s[\\\"\\^]*q[\\\"\\^]*l(?:[\\\"\\^]*(?:d[\\\"\\^]*u[\\\"\\^]*m[\\\"\\^]*p(?:[\\\"\\^]*s[\\\"\\^]*l[\\\"\\^]*o[\\\"\\^]*w)?|h[\\\"\\^]*o[\\\"\\^]*t[\\\"\\^]*c[\\\"\\^]*o[\\\"\\^]*p[\\\"\\^]*y|a[\\\"\\^]*d[\\\"\\^]*m[\\\"\\^]*i[\\\"\\^]*n|s[\\\"\\^]*h[\\\"\\^]*o[\\\"\\^]*w))?|s[\\\"\\^]*(?:i[\\\"\\^]*(?:n[\\\"\\^]*f[\\\"\\^]*o[\\\"\\^]*3[\\\"\\^]*2|e[\\\"\\^]*x[\\\"\\^]*e[\\\"\\^]*c)|c[\\\"\\^]*o[\\\"\\^]*n[\\\"\\^]*f[\\\"\\^]*i[\\\"\\^]*g|g[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|t[\\\"\\^]*s[\\\"\\^]*c)|o[\\\"\\^]*(?:u[\\\"\\^]*n[\\\"\\^]*t[\\\"\\^]*(?:(?:[\\s,;]|\\.|/|<|>).*|v[\\\"\\^]*o[\\\"\\^]*l)|v[\\\"\\^]*e[\\\"\\^]*u[\\\"\\^]*s[\\\"\\^]*e[\\\"\\^]*r|[dr][\\\"\\^]*e[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*)|k[\\\"\\^]*(?:d[\\\"\\^]*i[\\\"\\^]*r[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|l[\\\"\\^]*i[\\\"\\^]*n[\\\"\\^]*k)|d[\\\"\\^]*(?:s[\\\"\\^]*c[\\\"\\^]*h[\\\"\\^]*e[\\\"\\^]*d|(?:[\\s,;]|\\.|/|<|>).*)|a[\\\"\\^]*p[\\\"\\^]*i[\\\"\\^]*s[\\\"\\^]*e[\\\"\\^]*n[\\\"\\^]*d|b[\\\"\\^]*s[\\\"\\^]*a[\\\"\\^]*c[\\\"\\^]*l[\\\"\\^]*i|e[\\\"\\^]*a[\\\"\\^]*s[\\\"\\^]*u[\\\"\\^]*r[\\\"\\^]*e|m[\\\"\\^]*s[\\\"\\^]*y[\\\"\\^]*s)|d[\\\"\\^]*(?:i[\\\"\\^]*(?:s[\\\"\\^]*k[\\\"\\^]*(?:(?:m[\\\"\\^]*g[\\\"\\^]*m|p[\\\"\\^]*a[\\\"\\^]*r)[\\\"\\^]*t|s[\\\"\\^]*h[\\\"\\^]*a[\\\"\\^]*d[\\\"\\^]*o[\\\"\\^]*w)|r[\\\"\\^]*(?:(?:[\\s,;]|\\.|/|<|>).*|u[\\\"\\^]*s[\\\"\\^]*e)|f[\\\"\\^]*f[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*)|e[\\\"\\^]*(?:l[\\\"\\^]*(?:p[\\\"\\^]*r[\\\"\\^]*o[\\\"\\^]*f|t[\\\"\\^]*r[\\\"\\^]*e[\\\"\\^]*e|(?:[\\s,;]|\\.|/|<|>).*)|v[\\\"\\^]*(?:m[\\\"\\^]*g[\\\"\\^]*m[\\\"\\^]*t|c[\\\"\\^]*o[\\\"\\^]*n)|(?:f[\\\"\\^]*r[\\\"\\^]*a|b[\\\"\\^]*u)[\\\"\\^]*g)|s[\\\"\\^]*(?:a[\\\"\\^]*(?:c[\\\"\\^]*l[\\\"\\^]*s|d[\\\"\\^]*d)|q[\\\"\\^]*u[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*y|m[\\\"\\^]*o[\\\"\\^]*(?:v[\\\"\\^]*e|d)|g[\\\"\\^]*e[\\\"\\^]*t|r[\\\"\\^]*m)|(?:r[\\\"\\^]*i[\\\"\\^]*v[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*q[\\\"\\^]*u[\\\"\\^]*e[\\\"\\^]*r|o[\\\"\\^]*s[\\\"\\^]*k[\\\"\\^]*e)[\\\"\\^]*y|(?:c[\\\"\\^]*o[\\\"\\^]*m[\\\"\\^]*c[\\\"\\^]*n[\\\"\\^]*f|x[\\\"\\^]*d[\\\"\\^]*i[\\\"\\^]*a)[\\\"\\^]*g|a[\\\"\\^]*t[\\\"\\^]*e[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|n[\\\"\\^]*s[\\\"\\^]*s[\\\"\\^]*t[\\\"\\^]*a[\\\"\\^]*t)|c[\\\"\\^]*(?:o[\\\"\\^]*(?:m[\\\"\\^]*(?:p[\\\"\\^]*(?:(?:a[\\\"\\^]*c[\\\"\\^]*t[\\\"\\^]*)?(?:[\\s,;]|\\.|/|<|>).*|m[\\\"\\^]*g[\\\"\\^]*m[\\\"\\^]*t)|e[\\\"\\^]*x[\\\"\\^]*p)|n[\\\"\\^]*(?:2[\\\"\\^]*p|v[\\\"\\^]*e)[\\\"\\^]*r[\\\"\\^]*t|p[\\\"\\^]*y)|l[\\\"\\^]*(?:e[\\\"\\^]*a[\\\"\\^]*(?:n[\\\"\\^]*m[\\\"\\^]*g[\\\"\\^]*r|r[\\\"\\^]*m[\\\"\\^]*e[\\\"\\^]*m)|u[\\\"\\^]*s[\\\"\\^]*t[\\\"\\^]*e[\\\"\\^]*r)|h[\\\"\\^]*(?:k[\\\"\\^]*(?:n[\\\"\\^]*t[\\\"\\^]*f[\\\"\\^]*s|d[\\\"\\^]*s[\\\"\\^]*k)|d[\\\"\\^]*i[\\\"\\^]*r[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*)|s[\\\"\\^]*(?:c[\\\"\\^]*(?:r[\\\"\\^]*i[\\\"\\^]*p[\\\"\\^]*t|c[\\\"\\^]*m[\\\"\\^]*d)|v[\\\"\\^]*d[\\\"\\^]*e)|e[\\\"\\^]*r[\\\"\\^]*t[\\\"\\^]*(?:u[\\\"\\^]*t[\\\"\\^]*i[\\\"\\^]*l|r[\\\"\\^]*e[\\\"\\^]*q)|a[\\\"\\^]*(?:l[\\\"\\^]*l[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|c[\\\"\\^]*l[\\\"\\^]*s)|m[\\\"\\^]*d(?:[\\\"\\^]*k[\\\"\\^]*e[\\\"\\^]*y)?|i[\\\"\\^]*p[\\\"\\^]*h[\\\"\\^]*e[\\\"\\^]*r|u[\\\"\\^]*r[\\\"\\^]*l)|f[\\\"\\^]*(?:o[\\\"\\^]*r[\\\"\\^]*(?:m[\\\"\\^]*a[\\\"\\^]*t[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|f[\\\"\\^]*i[\\\"\\^]*l[\\\"\\^]*e[\\\"\\^]*s|e[\\\"\\^]*a[\\\"\\^]*c[\\\"\\^]*h)|i[\\\"\\^]*n[\\\"\\^]*d[\\\"\\^]*(?:(?:[\\s,;]|\\.|/|<|>).*|s[\\\"\\^]*t[\\\"\\^]*r)|s[\\\"\\^]*(?:m[\\\"\\^]*g[\\\"\\^]*m[\\\"\\^]*t|u[\\\"\\^]*t[\\\"\\^]*i[\\\"\\^]*l)|t[\\\"\\^]*(?:p[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|y[\\\"\\^]*p[\\\"\\^]*e)|r[\\\"\\^]*e[\\\"\\^]*e[\\\"\\^]*d[\\\"\\^]*i[\\\"\\^]*s[\\\"\\^]*k|c[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|g[\\\"\\^]*r[\\\"\\^]*e[\\\"\\^]*p)|n[\\\"\\^]*(?:e[\\\"\\^]*t[\\\"\\^]*(?:s[\\\"\\^]*(?:t[\\\"\\^]*a[\\\"\\^]*t|v[\\\"\\^]*c|h)|(?:[\\s,;]|\\.|/|<|>).*|c[\\\"\\^]*a[\\\"\\^]*t|d[\\\"\\^]*o[\\\"\\^]*m)|t[\\\"\\^]*(?:b[\\\"\\^]*a[\\\"\\^]*c[\\\"\\^]*k[\\\"\\^]*u[\\\"\\^]*p|r[\\\"\\^]*i[\\\"\\^]*g[\\\"\\^]*h[\\\"\\^]*t[\\\"\\^]*s)|(?:s[\\\"\\^]*l[\\\"\\^]*o[\\\"\\^]*o[\\\"\\^]*k[\\\"\\^]*u|m[\\\"\\^]*a)[\\\"\\^]*p|c[\\\"\\^]*(?:(?:[\\s,;]|\\.|/|<|>).*|a[\\\"\\^]*t)|b[\\\"\\^]*t[\\\"\\^]*s[\\\"\\^]*t[\\\"\\^]*a[\\\"\\^]*t)|e[\\\"\\^]*(?:x[\\\"\\^]*p[\\\"\\^]*(?:a[\\\"\\^]*n[\\\"\\^]*d[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|l[\\\"\\^]*o[\\\"\\^]*r[\\\"\\^]*e[\\\"\\^]*r)|v[\\\"\\^]*e[\\\"\\^]*n[\\\"\\^]*t[\\\"\\^]*(?:c[\\\"\\^]*r[\\\"\\^]*e[\\\"\\^]*a[\\\"\\^]*t[\\\"\\^]*e|v[\\\"\\^]*w[\\\"\\^]*r)|n[\\\"\\^]*d[\\\"\\^]*l[\\\"\\^]*o[\\\"\\^]*c[\\\"\\^]*a[\\\"\\^]*l|g[\\\"\\^]*r[\\\"\\^]*e[\\\"\\^]*p|r[\\\"\\^]*a[\\\"\\^]*s[\\\"\\^]*e|c[\\\"\\^]*h[\\\"\\^]*o)|g[\\\"\\^]*(?:a[\\\"\\^]*t[\\\"\\^]*h[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*n[\\\"\\^]*e[\\\"\\^]*t[\\\"\\^]*w[\\\"\\^]*o[\\\"\\^]*r[\\\"\\^]*k[\\\"\\^]*i[\\\"\\^]*n[\\\"\\^]*f[\\\"\\^]*o|p[\\\"\\^]*(?:(?:r[\\\"\\^]*e[\\\"\\^]*s[\\\"\\^]*u[\\\"\\^]*l|e[\\\"\\^]*d[\\\"\\^]*i)[\\\"\\^]*t|u[\\\"\\^]*p[\\\"\\^]*d[\\\"\\^]*a[\\\"\\^]*t[\\\"\\^]*e)|i[\\\"\\^]*t[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|e[\\\"\\^]*t[\\\"\\^]*m[\\\"\\^]*a[\\\"\\^]*c)|i[\\\"\\^]*(?:r[\\\"\\^]*b(?:[\\\"\\^]*(?:1(?:[\\\"\\^]*[89])?|2[\\\"\\^]*[012]))?|f[\\\"\\^]*m[\\\"\\^]*e[\\\"\\^]*m[\\\"\\^]*b[\\\"\\^]*e[\\\"\\^]*r|p[\\\"\\^]*c[\\\"\\^]*o[\\\"\\^]*n[\\\"\\^]*f[\\\"\\^]*i[\\\"\\^]*g|n[\\\"\\^]*e[\\\"\\^]*t[\\\"\\^]*c[\\\"\\^]*p[\\\"\\^]*l|c[\\\"\\^]*a[\\\"\\^]*c[\\\"\\^]*l[\\\"\\^]*s)|a[\\\"\\^]*(?:d[\\\"\\^]*(?:d[\\\"\\^]*u[\\\"\\^]*s[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*s|m[\\\"\\^]*o[\\\"\\^]*d[\\\"\\^]*c[\\\"\\^]*m[\\\"\\^]*d)|r[\\\"\\^]*p[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|t[\\\"\\^]*t[\\\"\\^]*r[\\\"\\^]*i[\\\"\\^]*b|s[\\\"\\^]*s[\\\"\\^]*o[\\\"\\^]*c|z[\\\"\\^]*m[\\\"\\^]*a[\\\"\\^]*n)|l[\\\"\\^]*(?:o[\\\"\\^]*g[\\\"\\^]*(?:e[\\\"\\^]*v[\\\"\\^]*e[\\\"\\^]*n[\\\"\\^]*t|t[\\\"\\^]*i[\\\"\\^]*m[\\\"\\^]*e|m[\\\"\\^]*a[\\\"\\^]*n|o[\\\"\\^]*f[\\\"\\^]*f)|a[\\\"\\^]*b[\\\"\\^]*e[\\\"\\^]*l[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|u[\\\"\\^]*s[\\\"\\^]*r[\\\"\\^]*m[\\\"\\^]*g[\\\"\\^]*r)|b[\\\"\\^]*(?:(?:c[\\\"\\^]*d[\\\"\\^]*(?:b[\\\"\\^]*o[\\\"\\^]*o|e[\\\"\\^]*d[\\\"\\^]*i)|r[\\\"\\^]*o[\\\"\\^]*w[\\\"\\^]*s[\\\"\\^]*t[\\\"\\^]*a)[\\\"\\^]*t|i[\\\"\\^]*t[\\\"\\^]*s[\\\"\\^]*a[\\\"\\^]*d[\\\"\\^]*m[\\\"\\^]*i[\\\"\\^]*n|o[\\\"\\^]*o[\\\"\\^]*t[\\\"\\^]*c[\\\"\\^]*f[\\\"\\^]*g)|h[\\\"\\^]*(?:o[\\\"\\^]*s[\\\"\\^]*t[\\\"\\^]*n[\\\"\\^]*a[\\\"\\^]*m[\\\"\\^]*e|d[\\\"\\^]*w[\\\"\\^]*w[\\\"\\^]*i[\\\"\\^]*z)|j[\\\"\\^]*a[\\\"\\^]*v[\\\"\\^]*a[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|7[\\\"\\^]*z(?:[\\\"\\^]*[ar])?)(?:\\.[\\\"\\^]*\\w+)?\\b\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932115) Executing operator \"Rx\" with param \"(?i)(?:;|\\{|\\||\\|\\||&|&&|\\n|\\r|`)\\s*[\\(,@\\'\\\"\\s]*(?:[\\w'\\\"\\./]+/|[\\\\\\\\'\\\"\\^]*\\w[\\\\\\\\'\\\"\\^]*:.*\\\\\\\\|[\\^\\.\\w '\\\"/\\\\\\\\]*\\\\\\\\)?[\\\"\\^]*(?:s[\\\"\\^]*(?:y[\\\"\\^]*s[\\\"\\^]*(?:t[\\\"\\^]*e[\\\"\\^]*m[\\\"\\^]*(?:p[\\\"\\^]*r[\\\"\\^]*o[\\\"\\^]*p[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*t[\\\"\\^]*i[\\\"\\^]*e[\\\"\\^]*s[\\\"\\^]*(?:d[\\\"\\^]*a[\\\"\\^]*t[\\\"\\^]*a[\\\"\\^]*e[\\\"\\^]*x[\\\"\\^]*e[\\\"\\^]*c[\\\"\\^]*u[\\\"\\^]*t[\\\"\\^]*i[\\\"\\^]*o[\\\"\\^]*n[\\\"\\^]*p[\\\"\\^]*r[\\\"\\^]*e[\\\"\\^]*v[\\\"\\^]*e[\\\"\\^]*n[\\\"\\^]*t[\\\"\\^]*i[\\\"\\^]*o[\\\"\\^]*n|(?:p[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*f[\\\"\\^]*o[\\\"\\^]*r[\\\"\\^]*m[\\\"\\^]*a[\\\"\\^]*n[\\\"\\^]*c|h[\\\"\\^]*a[\\\"\\^]*r[\\\"\\^]*d[\\\"\\^]*w[\\\"\\^]*a[\\\"\\^]*r)[\\\"\\^]*e|a[\\\"\\^]*d[\\\"\\^]*v[\\\"\\^]*a[\\\"\\^]*n[\\\"\\^]*c[\\\"\\^]*e[\\\"\\^]*d)|i[\\\"\\^]*n[\\\"\\^]*f[\\\"\\^]*o)|k[\\\"\\^]*e[\\\"\\^]*y|d[\\\"\\^]*m)|h[\\\"\\^]*(?:o[\\\"\\^]*(?:w[\\\"\\^]*(?:g[\\\"\\^]*r[\\\"\\^]*p|m[\\\"\\^]*b[\\\"\\^]*r)[\\\"\\^]*s|r[\\\"\\^]*t[\\\"\\^]*c[\\\"\\^]*u[\\\"\\^]*t)|e[\\\"\\^]*l[\\\"\\^]*l[\\\"\\^]*r[\\\"\\^]*u[\\\"\\^]*n[\\\"\\^]*a[\\\"\\^]*s|u[\\\"\\^]*t[\\\"\\^]*d[\\\"\\^]*o[\\\"\\^]*w[\\\"\\^]*n|r[\\\"\\^]*p[\\\"\\^]*u[\\\"\\^]*b[\\\"\\^]*w|a[\\\"\\^]*r[\\\"\\^]*e|i[\\\"\\^]*f[\\\"\\^]*t)|e[\\\"\\^]*(?:t[\\\"\\^]*(?:(?:x[\\\"\\^]*)?(?:[\\s,;]|\\.|/|<|>).*|l[\\\"\\^]*o[\\\"\\^]*c[\\\"\\^]*a[\\\"\\^]*l)|c[\\\"\\^]*p[\\\"\\^]*o[\\\"\\^]*l|l[\\\"\\^]*e[\\\"\\^]*c[\\\"\\^]*t)|c[\\\"\\^]*(?:h[\\\"\\^]*t[\\\"\\^]*a[\\\"\\^]*s[\\\"\\^]*k[\\\"\\^]*s|l[\\\"\\^]*i[\\\"\\^]*s[\\\"\\^]*t)|u[\\\"\\^]*b[\\\"\\^]*(?:i[\\\"\\^]*n[\\\"\\^]*a[\\\"\\^]*c[\\\"\\^]*l|s[\\\"\\^]*t)|t[\\\"\\^]*a[\\\"\\^]*r[\\\"\\^]*t[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|i[\\\"\\^]*g[\\\"\\^]*v[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*i[\\\"\\^]*f|l[\\\"\\^]*(?:e[\\\"\\^]*e[\\\"\\^]*p|m[\\\"\\^]*g[\\\"\\^]*r)|o[\\\"\\^]*r[\\\"\\^]*t|f[\\\"\\^]*c|v[\\\"\\^]*n)|p[\\\"\\^]*(?:s[\\\"\\^]*(?:s[\\\"\\^]*(?:h[\\\"\\^]*u[\\\"\\^]*t[\\\"\\^]*d[\\\"\\^]*o[\\\"\\^]*w[\\\"\\^]*n|e[\\\"\\^]*r[\\\"\\^]*v[\\\"\\^]*i[\\\"\\^]*c[\\\"\\^]*e|u[\\\"\\^]*s[\\\"\\^]*p[\\\"\\^]*e[\\\"\\^]*n[\\\"\\^]*d)|l[\\\"\\^]*(?:o[\\\"\\^]*g[\\\"\\^]*(?:g[\\\"\\^]*e[\\\"\\^]*d[\\\"\\^]*o[\\\"\\^]*n|l[\\\"\\^]*i[\\\"\\^]*s[\\\"\\^]*t)|i[\\\"\\^]*s[\\\"\\^]*t)|p[\\\"\\^]*(?:a[\\\"\\^]*s[\\\"\\^]*s[\\\"\\^]*w[\\\"\\^]*d|i[\\\"\\^]*n[\\\"\\^]*g)|g[\\\"\\^]*e[\\\"\\^]*t[\\\"\\^]*s[\\\"\\^]*i[\\\"\\^]*d|e[\\\"\\^]*x[\\\"\\^]*e[\\\"\\^]*c|f[\\\"\\^]*i[\\\"\\^]*l[\\\"\\^]*e|i[\\\"\\^]*n[\\\"\\^]*f[\\\"\\^]*o|k[\\\"\\^]*i[\\\"\\^]*l[\\\"\\^]*l)|o[\\\"\\^]*(?:w[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*(?:s[\\\"\\^]*h[\\\"\\^]*e[\\\"\\^]*l[\\\"\\^]*l(?:[\\\"\\^]*_[\\\"\\^]*i[\\\"\\^]*s[\\\"\\^]*e)?|c[\\\"\\^]*f[\\\"\\^]*g)|r[\\\"\\^]*t[\\\"\\^]*q[\\\"\\^]*r[\\\"\\^]*y|p[\\\"\\^]*d)|r[\\\"\\^]*(?:i[\\\"\\^]*n[\\\"\\^]*t[\\\"\\^]*(?:(?:[\\s,;]|\\.|/|<|>).*|b[\\\"\\^]*r[\\\"\\^]*m)|n[\\\"\\^]*(?:c[\\\"\\^]*n[\\\"\\^]*f[\\\"\\^]*g|m[\\\"\\^]*n[\\\"\\^]*g[\\\"\\^]*r)|o[\\\"\\^]*m[\\\"\\^]*p[\\\"\\^]*t)|a[\\\"\\^]*t[\\\"\\^]*h[\\\"\\^]*(?:p[\\\"\\^]*i[\\\"\\^]*n[\\\"\\^]*g|(?:[\\s,;]|\\.|/|<|>).*)|e[\\\"\\^]*r[\\\"\\^]*(?:l(?:[\\\"\\^]*(?:s[\\\"\\^]*h|5))?|f[\\\"\\^]*m[\\\"\\^]*o[\\\"\\^]*n)|y[\\\"\\^]*t[\\\"\\^]*h[\\\"\\^]*o[\\\"\\^]*n(?:[\\\"\\^]*(?:3(?:[\\\"\\^]*m)?|2))?|k[\\\"\\^]*g[\\\"\\^]*m[\\\"\\^]*g[\\\"\\^]*r|h[\\\"\\^]*p(?:[\\\"\\^]*[57])?|u[\\\"\\^]*s[\\\"\\^]*h[\\\"\\^]*d|i[\\\"\\^]*n[\\\"\\^]*g)|r[\\\"\\^]*(?:e[\\\"\\^]*(?:(?:p[\\\"\\^]*l[\\\"\\^]*a[\\\"\\^]*c[\\\"\\^]*e|n(?:[\\\"\\^]*a[\\\"\\^]*m[\\\"\\^]*e)?|s[\\\"\\^]*e[\\\"\\^]*t)[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|g[\\\"\\^]*(?:s[\\\"\\^]*v[\\\"\\^]*r[\\\"\\^]*3[\\\"\\^]*2|e[\\\"\\^]*d[\\\"\\^]*i[\\\"\\^]*t|(?:[\\s,;]|\\.|/|<|>).*|i[\\\"\\^]*n[\\\"\\^]*i)|c[\\\"\\^]*(?:d[\\\"\\^]*i[\\\"\\^]*s[\\\"\\^]*c|o[\\\"\\^]*v[\\\"\\^]*e[\\\"\\^]*r)|k[\\\"\\^]*e[\\\"\\^]*y[\\\"\\^]*w[\\\"\\^]*i[\\\"\\^]*z)|u[\\\"\\^]*(?:n[\\\"\\^]*(?:d[\\\"\\^]*l[\\\"\\^]*l[\\\"\\^]*3[\\\"\\^]*2|a[\\\"\\^]*s)|b[\\\"\\^]*y[\\\"\\^]*(?:1(?:[\\\"\\^]*[89])?|2[\\\"\\^]*[012]))|a[\\\"\\^]*(?:s[\\\"\\^]*(?:p[\\\"\\^]*h[\\\"\\^]*o[\\\"\\^]*n[\\\"\\^]*e|d[\\\"\\^]*i[\\\"\\^]*a[\\\"\\^]*l)|r[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*)|m[\\\"\\^]*(?:(?:d[\\\"\\^]*i[\\\"\\^]*r[\\\"\\^]*)?(?:[\\s,;]|\\.|/|<|>).*|t[\\\"\\^]*s[\\\"\\^]*h[\\\"\\^]*a[\\\"\\^]*r[\\\"\\^]*e)|o[\\\"\\^]*(?:u[\\\"\\^]*t[\\\"\\^]*e[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|b[\\\"\\^]*o[\\\"\\^]*c[\\\"\\^]*o[\\\"\\^]*p[\\\"\\^]*y)|s[\\\"\\^]*(?:t[\\\"\\^]*r[\\\"\\^]*u[\\\"\\^]*i|y[\\\"\\^]*n[\\\"\\^]*c)|d[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*)|t[\\\"\\^]*(?:a[\\\"\\^]*(?:s[\\\"\\^]*k[\\\"\\^]*(?:k[\\\"\\^]*i[\\\"\\^]*l[\\\"\\^]*l|l[\\\"\\^]*i[\\\"\\^]*s[\\\"\\^]*t|s[\\\"\\^]*c[\\\"\\^]*h[\\\"\\^]*d|m[\\\"\\^]*g[\\\"\\^]*r)|k[\\\"\\^]*e[\\\"\\^]*o[\\\"\\^]*w[\\\"\\^]*n)|(?:i[\\\"\\^]*m[\\\"\\^]*e[\\\"\\^]*o[\\\"\\^]*u|p[\\\"\\^]*m[\\\"\\^]*i[\\\"\\^]*n[\\\"\\^]*i|e[\\\"\\^]*l[\\\"\\^]*n[\\\"\\^]*e|l[\\\"\\^]*i[\\\"\\^]*s)[\\\"\\^]*t|s[\\\"\\^]*(?:d[\\\"\\^]*i[\\\"\\^]*s[\\\"\\^]*c[\\\"\\^]*o|s[\\\"\\^]*h[\\\"\\^]*u[\\\"\\^]*t[\\\"\\^]*d)[\\\"\\^]*n|y[\\\"\\^]*p[\\\"\\^]*e[\\\"\\^]*(?:p[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*f|(?:[\\s,;]|\\.|/|<|>).*)|r[\\\"\\^]*(?:a[\\\"\\^]*c[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*t|e[\\\"\\^]*e))|w[\\\"\\^]*(?:i[\\\"\\^]*n[\\\"\\^]*(?:d[\\\"\\^]*i[\\\"\\^]*f[\\\"\\^]*f|m[\\\"\\^]*s[\\\"\\^]*d[\\\"\\^]*p|v[\\\"\\^]*a[\\\"\\^]*r|r[\\\"\\^]*[ms])|u[\\\"\\^]*(?:a[\\\"\\^]*(?:u[\\\"\\^]*c[\\\"\\^]*l[\\\"\\^]*t|p[\\\"\\^]*p)|s[\\\"\\^]*a)|s[\\\"\\^]*c[\\\"\\^]*(?:r[\\\"\\^]*i[\\\"\\^]*p[\\\"\\^]*t|u[\\\"\\^]*i)|e[\\\"\\^]*v[\\\"\\^]*t[\\\"\\^]*u[\\\"\\^]*t[\\\"\\^]*i[\\\"\\^]*l|m[\\\"\\^]*i[\\\"\\^]*(?:m[\\\"\\^]*g[\\\"\\^]*m[\\\"\\^]*t|c)|a[\\\"\\^]*i[\\\"\\^]*t[\\\"\\^]*f[\\\"\\^]*o[\\\"\\^]*r|h[\\\"\\^]*o[\\\"\\^]*a[\\\"\\^]*m[\\\"\\^]*i|g[\\\"\\^]*e[\\\"\\^]*t)|u[\\\"\\^]*(?:s[\\\"\\^]*(?:e[\\\"\\^]*r[\\\"\\^]*a[\\\"\\^]*c[\\\"\\^]*c[\\\"\\^]*o[\\\"\\^]*u[\\\"\\^]*n[\\\"\\^]*t[\\\"\\^]*c[\\\"\\^]*o[\\\"\\^]*n[\\\"\\^]*t[\\\"\\^]*r[\\\"\\^]*o[\\\"\\^]*l[\\\"\\^]*s[\\\"\\^]*e[\\\"\\^]*t[\\\"\\^]*t[\\\"\\^]*i[\\\"\\^]*n[\\\"\\^]*g[\\\"\\^]*s|r[\\\"\\^]*s[\\\"\\^]*t[\\\"\\^]*a[\\\"\\^]*t)|n[\\\"\\^]*(?:r[\\\"\\^]*a[\\\"\\^]*r|z[\\\"\\^]*i[\\\"\\^]*p))|q[\\\"\\^]*(?:u[\\\"\\^]*e[\\\"\\^]*r[\\\"\\^]*y[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|p[\\\"\\^]*r[\\\"\\^]*o[\\\"\\^]*c[\\\"\\^]*e[\\\"\\^]*s[\\\"\\^]*s|w[\\\"\\^]*i[\\\"\\^]*n[\\\"\\^]*s[\\\"\\^]*t[\\\"\\^]*a|g[\\\"\\^]*r[\\\"\\^]*e[\\\"\\^]*p)|o[\\\"\\^]*(?:d[\\\"\\^]*b[\\\"\\^]*c[\\\"\\^]*(?:a[\\\"\\^]*d[\\\"\\^]*3[\\\"\\^]*2|c[\\\"\\^]*o[\\\"\\^]*n[\\\"\\^]*f)|p[\\\"\\^]*e[\\\"\\^]*n[\\\"\\^]*f[\\\"\\^]*i[\\\"\\^]*l[\\\"\\^]*e[\\\"\\^]*s)|v[\\\"\\^]*(?:o[\\\"\\^]*l[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*|e[\\\"\\^]*r[\\\"\\^]*i[\\\"\\^]*f[\\\"\\^]*y)|x[\\\"\\^]*c[\\\"\\^]*(?:a[\\\"\\^]*c[\\\"\\^]*l[\\\"\\^]*s|o[\\\"\\^]*p[\\\"\\^]*y)|z[\\\"\\^]*i[\\\"\\^]*p[\\\"\\^]*(?:[\\s,;]|\\.|/|<|>).*)(?:\\.[\\\"\\^]*\\w+)?\\b\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932120) Executing operator \"PmFromFile\" with param \"windows-powershell-commands.data\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932130) Executing operator \"Rx\" with param \"(?:\\$(?:\\((?:\\(.*\\)|.*)\\)|\\{.*\\})|[<>]\\(.*\\))\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932140) Executing operator \"Rx\" with param \"\\b(?:if(?:/i)?(?: not)?(?: exist\\b| defined\\b| errorlevel\\b| cmdextversion\\b|(?: |\\().*(?:\\bgeq\\b|\\bequ\\b|\\bneq\\b|\\bleq\\b|\\bgtr\\b|\\blss\\b|==))|for(?:/[dflr].*)? %+[^ ]+ in\\(.*\\)\\s?do)\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932150) Executing operator \"Rx\" with param \"(?:^|=)\\s*(?:{|\\s*\\(\\s*|\\w+=(?:[^\\s]*|\\$.*|\\$.*|<.*|>.*|\\'.*\\'|\\\".*\\\")\\s+|!\\s*|\\$)*\\s*(?:'|\\\")*(?:[\\?\\*\\[\\]\\(\\)\\-\\|+\\w'\\\"\\./\\\\\\\\]+/)?[\\\\\\\\'\\\"]*(?:l[\\\\\\\\'\\\"]*(?:s(?:[\\\\\\\\'\\\"]*(?:b[\\\\\\\\'\\\"]*_[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*e|c[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*u|m[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*d|p[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*i|u[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*b|-[\\\\\\\\'\\\"]*F|o[\\\\\\\\'\\\"]*f))?|z[\\\\\\\\'\\\"]*(?:(?:[ef][\\\\\\\\'\\\"]*)?g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|c[\\\\\\\\'\\\"]*(?:a[\\\\\\\\'\\\"]*t|m[\\\\\\\\'\\\"]*p)|m[\\\\\\\\'\\\"]*(?:o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e|a)|d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s)|e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*(?:(?:f[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*l|p[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*p)[\\\\\\\\'\\\"]*e|e[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*o)|a[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*(?:l[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*g(?:[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n)?|c[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*m)|w[\\\\\\\\'\\\"]*p(?:[\\\\\\\\'\\\"]*-[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*w[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*d)?|f[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*p(?:[\\\\\\\\'\\\"]*g[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*t)?|y[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*x)|s[\\\\\\\\'\\\"]*(?:e[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*(?:e[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*v|s[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*d)|n[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*l|d)|h(?:[\\\\\\\\'\\\"]*\\.[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*b)?|o[\\\\\\\\'\\\"]*(?:u[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*e|c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t)|t[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*g[\\\\\\\\'\\\"]*s|y[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*l|c[\\\\\\\\'\\\"]*(?:h[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*d|p)|d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|f[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*p|u[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*o|s[\\\\\\\\'\\\"]*h|v[\\\\\\\\'\\\"]*n)|p[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*r(?:[\\\\\\\\'\\\"]*(?:d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p))?|y[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*n(?:[\\\\\\\\'\\\"]*(?:3(?:[\\\\\\\\'\\\"]*m)?|2))?|k[\\\\\\\\'\\\"]*(?:e[\\\\\\\\'\\\"]*x[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*c|i[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*l)|r[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*v|(?:g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e|f[\\\\\\\\'\\\"]*t)[\\\\\\\\'\\\"]*p|e[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*l(?:[\\\\\\\\'\\\"]*5)?|h[\\\\\\\\'\\\"]*p(?:[\\\\\\\\'\\\"]*[57])?|i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*g|o[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*d)|n[\\\\\\\\'\\\"]*(?:c(?:[\\\\\\\\'\\\"]*(?:\\.[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*l|o[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*b[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*d)|a[\\\\\\\\'\\\"]*t))?|e[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*(?:k[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*-[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*p|(?:s[\\\\\\\\'\\\"]*t|c)[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t)|o[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*p|p[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*g|s[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t)|t[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*(?:p[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*e|i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*g)|s[\\\\\\\\'\\\"]*h)|r[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*e(?:[\\\\\\\\'\\\"]*6)?|i[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*e(?:[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*t)?|a[\\\\\\\\'\\\"]*(?:i[\\\\\\\\'\\\"]*l(?:[\\\\\\\\'\\\"]*f)?|r)|e[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*t)|r[\\\\\\\\'\\\"]*(?:e[\\\\\\\\'\\\"]*(?:p[\\\\\\\\'\\\"]*(?:l[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*e|e[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t)|a[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*h|n[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*e)|u[\\\\\\\\'\\\"]*b[\\\\\\\\'\\\"]*y(?:[\\\\\\\\'\\\"]*(?:1(?:[\\\\\\\\'\\\"]*[89])?|2[\\\\\\\\'\\\"]*[012]))?|m[\\\\\\\\'\\\"]*(?:u[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*e|d[\\\\\\\\'\\\"]*i)[\\\\\\\\'\\\"]*r|n[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*o|s[\\\\\\\\'\\\"]*y[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*c|c[\\\\\\\\'\\\"]*p)|b[\\\\\\\\'\\\"]*(?:z[\\\\\\\\'\\\"]*(?:(?:[ef][\\\\\\\\'\\\"]*)?g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s|m[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e|c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t)|s[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t|i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|t[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*r)|u[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n|a[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*h)|m[\\\\\\\\'\\\"]*(?:y[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*q[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*(?:d[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*p(?:[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*w)?|h[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*y|a[\\\\\\\\'\\\"]*d[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n|s[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*w)|l[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*e|a[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*q)|u[\\\\\\\\'\\\"]*(?:n[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s|l[\\\\\\\\'\\\"]*z[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*a|a[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*e|r[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*r|s[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*t|z[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*p|x[\\\\\\\\'\\\"]*z)|s[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*(?:(?:a[\\\\\\\\'\\\"]*d|m[\\\\\\\\'\\\"]*o)[\\\\\\\\'\\\"]*d|d[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*l))|x[\\\\\\\\'\\\"]*(?:z(?:[\\\\\\\\'\\\"]*(?:(?:[ef][\\\\\\\\'\\\"]*)?g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|d[\\\\\\\\'\\\"]*(?:i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|e[\\\\\\\\'\\\"]*c)|c[\\\\\\\\'\\\"]*(?:a[\\\\\\\\'\\\"]*t|m[\\\\\\\\'\\\"]*p)|l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s|m[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e))?|a[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*g[\\\\\\\\'\\\"]*s)|z[\\\\\\\\'\\\"]*(?:(?:(?:[ef][\\\\\\\\'\\\"]*)?g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e|i)[\\\\\\\\'\\\"]*p|c[\\\\\\\\'\\\"]*(?:a[\\\\\\\\'\\\"]*t|m[\\\\\\\\'\\\"]*p)|d[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*s|m[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e|r[\\\\\\\\'\\\"]*u[\\\\\\\\'\\\"]*n|s[\\\\\\\\'\\\"]*h)|f[\\\\\\\\'\\\"]*(?:t[\\\\\\\\'\\\"]*p[\\\\\\\\'\\\"]*(?:s[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*s|w[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*o)|i[\\\\\\\\'\\\"]*l[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*t|e[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*c[\\\\\\\\'\\\"]*h|g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p)|c[\\\\\\\\'\\\"]*(?:o[\\\\\\\\'\\\"]*(?:m[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*d|p[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*c)|u[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*l|s[\\\\\\\\'\\\"]*h|c)|e[\\\\\\\\'\\\"]*(?:g[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|c[\\\\\\\\'\\\"]*h[\\\\\\\\'\\\"]*o|v[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*l|x[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*c|n[\\\\\\\\'\\\"]*v)|d[\\\\\\\\'\\\"]*(?:m[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*g|a[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*h|i[\\\\\\\\'\\\"]*f[\\\\\\\\'\\\"]*f|o[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*s)|g[\\\\\\\\'\\\"]*(?:z[\\\\\\\\'\\\"]*(?:c[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*t|i[\\\\\\\\'\\\"]*p)|r[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*p|c[\\\\\\\\'\\\"]*c)|j[\\\\\\\\'\\\"]*(?:o[\\\\\\\\'\\\"]*b[\\\\\\\\'\\\"]*s[\\\\\\\\'\\\"]*\\s+[\\\\\\\\'\\\"]*-[\\\\\\\\'\\\"]*x|a[\\\\\\\\'\\\"]*v[\\\\\\\\'\\\"]*a)|w[\\\\\\\\'\\\"]*(?:h[\\\\\\\\'\\\"]*o[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*m[\\\\\\\\'\\\"]*i|g[\\\\\\\\'\\\"]*e[\\\\\\\\'\\\"]*t|3[\\\\\\\\'\\\"]*m)|i[\\\\\\\\'\\\"]*r[\\\\\\\\'\\\"]*b(?:[\\\\\\\\'\\\"]*(?:1(?:[\\\\\\\\'\\\"]*[89])?|2[\\\\\\\\'\\\"]*[012]))?|o[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*n[\\\\\\\\'\\\"]*t[\\\\\\\\'\\\"]*r|h[\\\\\\\\'\\\"]*(?:e[\\\\\\\\'\\\"]*a[\\\\\\\\'\\\"]*d|u[\\\\\\\\'\\\"]*p)|v[\\\\\\\\'\\\"]*i[\\\\\\\\'\\\"]*(?:g[\\\\\\\\'\\\"]*r|p[\\\\\\\\'\\\"]*w)|G[\\\\\\\\'\\\"]*E[\\\\\\\\'\\\"]*T)[\\\\\\\\'\\\"]*(?:\\s|;|\\||&|<|>)\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932160) Executing operator \"PmFromFile\" with param \"unix-shell.data\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:normalizePath: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:normalizePath: \">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932170) Executing operator \"Rx\" with param \"^\\(\\s*\\)\\s+{\" against REQUEST_HEADERS|REQUEST_LINE.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecode: \"localhost:8080\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"localhost:8080\" (Variable: REQUEST_HEADERS:Host)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecode: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"curl/7.54.0\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecode: \"*/*\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"*/*\" (Variable: REQUEST_HEADERS:Accept)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecode: \"GET /?q=\"> HTTP/1.1\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"GET /?q=\"> HTTP/1.1\" (Variable: REQUEST_LINE)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932171) Executing operator \"Rx\" with param \"^\\(\\s*\\)\\s+{\" against ARGS_NAMES|ARGS|FILES_NAMES.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932180) Executing operator \"PmFromFile\" with param \"restricted-upload.data\" against FILES|REQUEST_HEADERS:X-Filename|REQUEST_HEADERS:X_Filename|REQUEST_HEADERS:X-File-Name.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 932014) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '932016' due to a SecMarker: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '932106' due to a SecMarker: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '932190' due to a SecMarker: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '932018' due to a SecMarker: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-932-APPLICATION-ATTACK-RCE\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 5 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933100) Executing operator \"Rx\" with param \"(?:<\\?(?:[^x]|x[^m]|xm[^l]|xml[^\\s]|xml$|$)|<\\?php|\\[(?:\\/|\\\\\\\\)?php\\])\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933110) Executing operator \"Rx\" with param \".*\\.(?:php\\d*|phtml)\\.*$\" against FILES|REQUEST_HEADERS:X-Filename|REQUEST_HEADERS:X_Filename|REQUEST_HEADERS:X.Filename|REQUEST_HEADERS:X-File-Name.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933120) Executing operator \"PmFromFile\" with param \"php-config-directives.data\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:normalisePath: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:normalisePath: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933130) Executing operator \"PmFromFile\" with param \"php-variables.data\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:normalisePath: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:normalisePath: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933140) Executing operator \"Rx\" with param \"(?i)php://(?:std(?:in|out|err)|(?:in|out)put|fd|memory|temp|filter)\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933200) Executing operator \"Rx\" with param \"(?i:zlib|glob|phar|ssh2|rar|ogg|expect|zip)://\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:removeNulls: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:removeNulls: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cmdLine: \">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933150) Executing operator \"PmFromFile\" with param \"php-function-names-933150.data\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|REQUEST_FILENAME|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/\" (Variable: REQUEST_FILENAME)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:lowercase: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933160) Executing operator \"Rx\" with param \"(?i)\\b(?:s(?:e(?:t(?:_(?:e(?:xception|rror)_handler|magic_quotes_runtime|include_path)|defaultstub)|ssion_s(?:et_save_handler|tart))|qlite_(?:(?:(?:unbuffered|single|array)_)?query|create_(?:aggregate|function)|p?open|exec)|tr(?:eam_(?:context_create|socket_client)|ipc?slashes|rev)|implexml_load_(?:string|file)|ocket_c(?:onnect|reate)|h(?:ow_sourc|a1_fil)e|pl_autoload_register|ystem)|p(?:r(?:eg_(?:replace(?:_callback(?:_array)?)?|match(?:_all)?|split)|oc_(?:(?:terminat|clos|nic)e|get_status|open)|int_r)|o(?:six_(?:get(?:(?:e[gu]|g)id|login|pwnam)|mk(?:fifo|nod)|ttyname|kill)|pen)|hp(?:_(?:strip_whitespac|unam)e|version|info)|g_(?:(?:execut|prepar)e|connect|query)|a(?:rse_(?:ini_file|str)|ssthru)|utenv)|r(?:unkit_(?:function_(?:re(?:defin|nam)e|copy|add)|method_(?:re(?:defin|nam)e|copy|add)|constant_(?:redefine|add))|e(?:(?:gister_(?:shutdown|tick)|name)_function|ad(?:(?:gz)?file|_exif_data|dir))|awurl(?:de|en)code)|i(?:mage(?:createfrom(?:(?:jpe|pn)g|x[bp]m|wbmp|gif)|(?:jpe|pn)g|g(?:d2?|if)|2?wbmp|xbm)|s_(?:(?:(?:execut|write?|read)ab|fi)le|dir)|ni_(?:get(?:_all)?|set)|terator_apply|ptcembed)|g(?:et(?:_(?:c(?:urrent_use|fg_va)r|meta_tags)|my(?:[gpu]id|inode)|(?:lastmo|cw)d|imagesize|env)|z(?:(?:(?:defla|wri)t|encod|fil)e|compress|open|read)|lob)|a(?:rray_(?:u(?:intersect(?:_u?assoc)?|diff(?:_u?assoc)?)|intersect_u(?:assoc|key)|diff_u(?:assoc|key)|filter|reduce|map)|ssert(?:_options)?)|h(?:tml(?:specialchars(?:_decode)?|_entity_decode|entities)|(?:ash(?:_(?:update|hmac))?|ighlight)_file|e(?:ader_register_callback|x2bin))|f(?:i(?:le(?:(?:[acm]tim|inod)e|(?:_exist|perm)s|group)?|nfo_open)|tp_(?:nb_(?:ge|pu)|connec|ge|pu)t|(?:unction_exis|pu)ts|write|open)|o(?:b_(?:get_(?:c(?:ontents|lean)|flush)|end_(?:clean|flush)|clean|flush|start)|dbc_(?:result(?:_all)?|exec(?:ute)?|connect)|pendir)|m(?:b_(?:ereg(?:_(?:replace(?:_callback)?|match)|i(?:_replace)?)?|parse_str)|(?:ove_uploaded|d5)_file|ethod_exists|ysql_query|kdir)|e(?:x(?:if_(?:t(?:humbnail|agname)|imagetype|read_data)|ec)|scapeshell(?:arg|cmd)|rror_reporting|val)|c(?:url_(?:file_create|exec|init)|onvert_uuencode|reate_function|hr)|u(?:n(?:serialize|pack)|rl(?:de|en)code|[ak]?sort)|(?:json_(?:de|en)cod|debug_backtrac|tmpfil)e|b(?:(?:son_(?:de|en)|ase64_en)code|zopen)|var_dump)(?:\\s|/\\*.*\\*/|//.*|#.*)*\\(.*\\)\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|REQUEST_FILENAME|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/\" (Variable: REQUEST_FILENAME)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933170) Executing operator \"Rx\" with param \"[oOcC]:\\d+:\\\".+?\\\":\\d+:{.*}\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|REQUEST_HEADERS|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"localhost:8080\" (Variable: REQUEST_HEADERS:Host)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"curl/7.54.0\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"*/*\" (Variable: REQUEST_HEADERS:Accept)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933180) Executing operator \"Rx\" with param \"\\$+(?:[a-zA-Z_\\x7f-\\xff][a-zA-Z0-9_\\x7f-\\xff]*|\\s*{.+})(?:\\s|\\[.+\\]|{.+}|/\\*.*\\*/|//.*|#.*)*\\(.*\\)\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|REQUEST_FILENAME|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/\" (Variable: REQUEST_FILENAME)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933210) Executing operator \"Rx\" with param \"(?:(?:\\(|\\[)[a-zA-Z0-9_.$\\\"'\\[\\](){}/*\\s]+(?:\\)|\\])[0-9_.$\\\"'\\[\\](){}/*\\s]*\\([a-zA-Z0-9_.$\\\"'\\[\\](){}/*\\s].*\\)|\\([\\s]*string[\\s]*\\)[\\s]*(?:\\\"|'))\" against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|REQUEST_FILENAME|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecode: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:replaceComments: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:compressWhitespace: \"/\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"/\" (Variable: REQUEST_FILENAME)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:replaceComments: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:compressWhitespace: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:replaceComments: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:compressWhitespace: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 933014) Executing operator \"Lt\" with param \"2\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: skipAfter\r\n[157893220069.056272] [/?q=\">] [5] Setting skipAfter for: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: pass.\r\n[157893220069.056272] [/?q=\">] [8] Running action pass\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '933151' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '933016' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '933131' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '933161' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '933111' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '933190' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '933018' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule:\r\n[157893220069.056272] [/?q=\">] [9] Skipped rule id '0' due to a SecMarker: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [9] Rule: END-REQUEST-933-APPLICATION-ATTACK-PHP\r\n[157893220069.056272] [/?q=\">] [4] Out of a SecMarker after skip 8 rules.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 941012) Executing operator \"Lt\" with param \"1\" against TX:EXECUTING_PARANOIA_LEVEL.\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"1\" (Variable: TX:EXECUTING_PARANOIA_LEVEL)\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 0.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars cleaned.\r\n[157893220069.056272] [/?q=\">] [4] (Rule: 941100) Executing operator \"DetectXSS against REQUEST_COOKIES|REQUEST_COOKIES_NAMES|REQUEST_HEADERS:User-Agent|ARGS_NAMES|ARGS|XML:/*.\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:jsDecode: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cssDecode: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:removeNulls: \"curl/7.54.0\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"curl/7.54.0\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[157893220069.056272] [/?q=\">] [9] libinjection was not able to find any XSS in: curl/7.54.0\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:jsDecode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cssDecode: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:removeNulls: \"q\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"q\" (Variable: ARGS_NAMES:q)\r\n[157893220069.056272] [/?q=\">] [9] libinjection was not able to find any XSS in: q\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:utf8toUnicode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:urlDecodeUni: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:htmlEntityDecode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:jsDecode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:cssDecode: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] T (0) t:removeNulls: \"\">\"\r\n[157893220069.056272] [/?q=\">] [9] Target value: \"\">\" (Variable: ARGS:q)\r\n[157893220069.056272] [/?q=\">] [5] detected XSS using libinjection.\r\n[157893220069.056272] [/?q=\">] [9] Matched vars updated.\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:xss_score with value: 5\r\n[157893220069.056272] [/?q=\">] [4] Running [independent] (non-disruptive) action: setvar\r\n[157893220069.056272] [/?q=\">] [8] Saving variable: TX:anomaly_score_pl1 with value: 5\r\n[157893220069.056272] [/?q=\">] [9] This rule severity is: 2 current transaction is: 2\r\n[157893220069.056272] [/?q=\">] [9] Saving msg: XSS Attack Detected via libinjection\r\n[157893220069.056272] [/?q=\">] [4] Rule returned 1.\r\n[157893220069.056272] [/?q=\">] [9] Running action: nolog\r\n[157893220069.056272] [/?q=\">] [9] Running action: auditlog\r\n[157893220069.056272] [/?q=\">] [9] Saving transaction to logs\r\n[157893220069.056272] [/?q=\">] [9] Running action: status\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: application-multi\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: language-multi\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: platform-multi\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: attack-xss\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: paranoia-level/1\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: OWASP_CRS\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: OWASP_CRS/WEB_ATTACK/XSS\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: WASCTC/WASC-8\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: WASCTC/WASC-22\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: OWASP_TOP_10/A3\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: OWASP_AppSensor/IE1\r\n[157893220069.056272] [/?q=\">] [4] Running (non-disruptive) action: tag\r\n[157893220069.056272] [/?q=\">] [9] Rule tag: CAPEC-242\r\n[157893220069.056272] [/?q=\">] [4] Running (disruptive) action: block.\r\n[157893220069.056272] [/?q=\">] [8] Marking request as disruptive.\r\n[157893220069.056272] [/?q=\">] [8] Running action deny\r\n[157893220069.056272] [/?q=\">] [9] Running action: ctl\r\n[157893220069.056272] [/?q=\">] [8] Skipping this phase as this request was already intercepted.\r\n[157893220069.056272] [/?q=\">] [4] Not appending response body. Response Content-Type is . It is not marked to be inspected.\r\n[157893220069.056272] [/?q=\">] [4] Not appending response body. Response Content-Type is . It is not marked to be inspected.\r\n[157893220069.056272] [/?q=\">] [4] Starting phase RESPONSE_BODY. (SecRules 4)\r\n[157893220069.056272] [/?q=\">] [5] Response Content-Type is . It is not marked to be inspected.\r\n[157893220069.056272] [/?q=\">] [8] Content-Type(s) marked to be inspected: text/html text/plain text/xml`\r\n\r\n/var/log/nginx/error.log\r\n`2020/01/13 16:20:12 [error] 6#6: *4 [client 172.17.0.1] ModSecurity: Access denied with code 403 (phase 2). detected XSS using libinjection. [file \"/usr/local/owasp-modsecurity-crs/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf\"] [line \"37\"] [id \"941100\"] [rev \"\"] [msg \"XSS Attack Detected via libinjection\"] [data \"Matched Data: XSS data found within ARGS:q: \">\"] [severity \"2\"] [ver \"OWASP_CRS/3.2.0\"] [maturity \"0\"] [accuracy \"0\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-xss\"] [tag \"paranoia-level/1\"] [tag \"OWASP_CRS\"] [tag \"OWASP_CRS/WEB_ATTACK/XSS\"] [tag \"WASCTC/WASC-8\"] [tag \"WASCTC/WASC-22\"] [tag \"OWASP_TOP_10/A3\"] [tag \"OWASP_AppSensor/IE1\"] [tag \"CAPEC-242\"] [hostname \"172.17.0.3\"] [uri \"/\"] [unique_id \"157893241261.889121\"] [ref \"v8,27t:utf8toUnicode,t:urlDecodeUni,t:htmlEntityDecode,t:jsDecode,t:cssDecode,t:removeNulls\"], client: 172.17.0.1, server: juice.sky, request: \"GET /?q=\"> HTTP/1.1\", host: \"localhost:8080\"`\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the behavior:\r\n\r\nEnable RuleEngine and audit logs in modsecurity.conf,\r\n\r\n```\r\nSecRuleEngine On\r\nSecAuditEngine On\r\nSecAuditLogParts ABIJDEFHZ\r\nSecAuditLogType Serial\r\nSecAuditLogFormat JSON\r\nSecAuditLog /var/log/modsec_audit.log\r\n```\r\n\r\n\r\nA **curl** command line that mimics the original request and reproduces the problem. Or a ModSecurity v3 test case.\r\n\r\n[e.g: curl \"modsec-full/ca/..\\\\..\\\\..\\\\..\\\\..\\\\..\\\\/\\\\etc/\\\\passwd\" or [issue-394.json](https://github.com/SpiderLabs/ModSecurity/blob/v3/master/test/test-cases/regression/issue-394.json)]\r\n\r\n`curl 'http://localhost:8080/?q=\">'` \r\n\r\n**Expected behavior**\r\n\r\nA clear and concise description of what you expected to happen.\r\n\r\nBlock logged in /var/log/modsec_audit.log\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): [e.g. ModSecurity v3.0.1 with nginx-connector v1.0.0]\r\n - WebServer: [e.g. nginx-1.15.5]\r\n - OS (and distro): [e.g. Linux, archlinux]\r\n\r\nModSecurity version v3.0.4 with nginx-connector v1.0.1\r\nnginx-1.17.6\r\nLinux, Debian 10 (Buster)\r\n\r\n\r\n**Rule Set (please complete the following information):**\r\n - Running any public or commercial rule set? [e.g. SpiderLabs commercial rules]\r\n - What is the version number? [e.g. 2018-08-11]\r\n\r\nOWASP v3.3\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n\r\n`root@8f378ae6cecd:/# cat /usr/local/owasp-modsecurity-crs/crs-setup.conf\r\n# Set block by default (Won't block if SecRuleEngine is in detectiononly or off)\r\nSecDefaultAction \"phase:1,nolog,auditlog,deny,status:403\"\r\nSecDefaultAction \"phase:2,nolog,auditlog,deny,status:403\"\r\n\r\n# Set installed CRS version\r\nSecAction \\\r\n \"id:900990,\\\r\n phase:1,\\\r\n nolog,\\\r\n pass,\\\r\n t:none,\\\r\n setvar:tx.crs_setup_version=330\"\r\n\r\n# Set timeout to lower value\r\nSecCollectionTimeout 600`\r\n", + "summary": "Log XSS attacks in ModSecurity audit log.\n\n**Issue Overview**\nTest a WAF setup with NGINX, ModSecurity, and OWASP ruleset by executing a simple XSS attack using CURL. The attack gets blocked with a 403 status code. However, it is only logged in the NGINX error log, not in the ModSecurity audit log. This needs to be rectified to ensure that all blocked requests are logged for SIEM integration.\n\n**Technical Details**\n- The current ModSecurity configuration uses `SecRuleEngine On`, `SecAuditEngine On`, and `SecAuditLogParts ABIJDEFHZ`, but the blocks are not appearing in the audit log.\n- The relevant CURL command executed is:\n ```\n curl 'http://localhost:8080/?q=\">'\n ```\n- ModSecurity version: v3.0.4 with NGINX connector v1.0.1.\n- NGINX version: 1.17.6.\n- Operating system: Debian 10 (Buster).\n- OWASP ruleset version: v3.3.\n \n**Current Log Output**\n- The error log shows that an XSS attack was detected but does not log it into the audit log:\n ```\n [error] ModSecurity: Access denied with code 403 ... detected XSS using libinjection ... matched Data: XSS data found within ARGS:q: \">\"\n ```\n\n**Expected Behavior**\n- The blocked request should be recorded in `/var/log/modsec_audit.log` to facilitate monitoring and alerting.\n\n**Proposed Steps to Resolve**\n1. **Check ModSecurity Rules**: Review the relevant rules that are supposed to log the blocked requests. Ensure that the action to log is not overridden by any other configurations.\n \n2. **Audit Log Configuration**: Ensure the audit log configuration is correctly set to capture all blocked requests:\n ```apache\n SecAuditLogParts ABIJDEFHZ\n SecAuditLogType Serial\n SecAuditLogFormat JSON\n SecAuditLog /var/log/modsec_audit.log\n ```\n Confirm that `SecAuditLogType` is set to `Serial` to avoid possible logging issues.\n\n3. **Default Action Configuration**: The default actions for both phases should include logging:\n ```apache\n SecDefaultAction \"phase:1,nolog,auditlog,deny,status:403\"\n SecDefaultAction \"phase:2,nolog,auditlog,deny,status:403\"\n ```\n Review if any action changes are preventing logging.\n\n4. **Test with Different Payloads**: Use various payloads to ensure the logging works for different cases. It might be worth testing with payloads that are known to trigger logging actions.\n\n5. **Review ModSecurity Debug Logs**: Set the debug log level to a higher setting (e.g., level 9) to capture detailed information about rule processing and actions taken. This will help identify why the audit log isn't capturing the blocks as expected.\n\n6. **Consult Documentation**: Review ModSecurity documentation or community forums for similar issues or any known bugs related to the version being used.\n\n7. **Update ModSecurity**: If all else fails and the issue persists, consider upgrading to the latest stable version of ModSecurity, as the issue might be resolved in newer versions.\n\nBy following these steps, ensure that all blocked requests are properly logged in the audit log for future analysis and monitoring.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2237", @@ -60461,10 +62567,10 @@ 940 ], "labels": [ - 399, 400, 404, - 413 + 413, + 399 ] } }, @@ -60477,6 +62583,7 @@ "node_id": "MDU6SXNzdWU1NzI4ODE5ODA=", "title": "SecRequestBodyAccess Off doesn't fully disable request body processing", "body": "Hi!\r\n\r\nWhy setting `SecRequestBodyAccess Off ` doesn't fully disable parsing request body and filling ModSecurity variables?\r\n\r\nMy case:\r\nI'm using ModSecurity with Nginx for proxying files as multipart/form-data. I don't want to process body content and save memory,\r\nso I set `SecRequestBodyAccess` to `Off ` and when I send a big file I get bad_alloc error and core dump:\r\n\r\n```\r\n(gdb) bt full\r\n#0 0x00007f939d2b9377 in raise () from /usr/lib64/libc.so.6\r\nNo symbol table info available.\r\n#1 0x00007f939d2baa68 in abort () from /usr/lib64/libc.so.6\r\nNo symbol table info available.\r\n#2 0x00007f939b74d7d5 in __gnu_cxx::__verbose_terminate_handler() ()\r\nfrom /usr/lib64/libstdc++.so.6\r\nNo symbol table info available.\r\n#3 0x00007f939b74b746 in ?? () from /usr/lib64/libstdc++.so.6\r\nNo symbol table info available.\r\n#4 0x00007f939b74b773 in std::terminate() () from /usr/lib64/libstdc++.so.6\r\nNo symbol table info available.\r\n#5 0x00007f939b74b993 in __cxa_throw () from /usr/lib64/libstdc++.so.6\r\nNo symbol table info available.\r\n#6 0x00007f939b74bf2d in operator new(unsigned long) () from /usr/lib64/libstdc++.so.6\r\nNo symbol table info available.\r\n#7 0x00007f939b7aaa19 in std::string::_Rep::_S_create(unsigned long, unsigned long, std::allocator const&) () from /usr/lib64/libstdc++.so.6\r\nNo symbol table info available.\r\n#8 0x00007f939b7aabd6 in std::string::_M_mutate(unsigned long, unsigned long, unsigned long) () from /usr/lib64/libstdc++.so.6\r\nNo symbol table info available.\r\n#9 0x00007f939b7ab19e in std::string::_M_replace_safe(unsigned long, unsigned long, char const*, unsigned long) () from /usr/lib64/libstdc++.so.6\r\nNo symbol table info available.\r\n#10 0x00007f939f364683 in modsecurity::AnchoredVariable::set(std::string const&, unsigned long) () at anchored_variable.cc:71\r\nNo locals.\r\n#11 0x00007f939f355155 in modsecurity::Transaction::processRequestBody() ()\r\nat transaction.cc:873\r\nPython Exception There is no member or method named _M_head_impl.: \r\na = \r\n^[[ fullRequest = \"Accept: */*\\nContent-Length: 1048586473\\nContent-Type: multipart/form-data; boundary=yBnB6soCHCHTWWhy6kHZ-T6lUqGnDkGXJdrd\\nConnection: Keep-Alive\\nUser-Agent: Apache-HttpClient/4.5\".. \r\nl = std::vector of length 7, capacity 8 = {0x9e6df0, 0x9e6d70, 0x9e6cf0, \r\n0x9e6c90, 0x9e6c30, 0x9e5a30, 0x9e5b40}\r\n#12 0x000000000055111f in ngx_http_modsecurity_pre_access_handler ()\r\nNo symbol table info available.\r\n```\r\n\r\n\r\nFor me it's seems like I could have to have setting to fully disable body processing.\r\n\r\nI've analyzed code a bit and found available memory checking condition:\r\nhttps://github.com/SpiderLabs/ModSecurity/blob/v3/master/src/transaction.cc#L977\r\n\r\nBut following REQUEST_BODY variable setting doubles occupied memory size what leads bad_alloc error:\r\nhttps://github.com/SpiderLabs/ModSecurity/blob/v3/master/src/anchored_variable.cc#L71\r\n\r\nIf it's a bug I could contribute and make pull request.\r\nBut now for me it's not really obvious what approach is better to fix it.\r\n", + "summary": "Disable full request body processing in ModSecurity by ensuring `SecRequestBodyAccess Off` functions correctly. Investigate why setting this directive does not prevent memory allocation for request body variables, leading to a `bad_alloc` error and core dump when handling large files.\n\n1. Confirm the `SecRequestBodyAccess` directive is correctly set in your configuration.\n2. Analyze the ModSecurity source code, particularly in `transaction.cc` and `anchored_variable.cc`, to identify areas that still allocate memory for the request body even when body access is disabled.\n3. Check the memory checking condition in the code (found in `transaction.cc` at line 977) to ensure it effectively prevents memory allocation under the current configuration.\n4. Test different request sizes and body types to reproduce the issue consistently and gather more debugging information.\n5. Consider implementing a patch that modifies the memory allocation logic to respect the `SecRequestBodyAccess Off` setting more effectively.\n6. If the problem persists, prepare to submit a detailed bug report or pull request to the ModSecurity repository, including your findings and any proposed code changes.\n\nBy following these steps, aim to fix the issue or clarify if it is indeed a bug that requires further development attention.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2273", @@ -60507,6 +62614,7 @@ "node_id": "MDU6SXNzdWU1NzY3OTQ4ODE=", "title": "Log output for JSON/XML error msg has an extra newline in it", "body": "**Describe the bug**\r\n\r\nThere is just a straight up newline in the error msg between XML/JSON parse error prints, example below:\r\n\r\n```\r\nModSecurity: Access denied with code 400 (phase 2). Matched \"Operator `Eq' with parameter `0' against variable `REQBODY_ERROR' (Value: `1' ) [file \"/ModSecurity/modsecurity.conf\"] [line \"14\"] [id \"200002\"] [rev \"\"] [msg \"Failed to parse request body.\"] [data \"JSON parsing error: parse error: trailing garbage\\x0a\"] [severity \"2\"] [ver \"\"] [maturity \"0\"] [accuracy \"0\"] [hostname \"10.129.99.196\"] [uri \"/F5/status\"] [unique_id \"158348520836.962520\"] [ref \"v456,1\"]\r\n```\r\n\r\nLatest version etc. The reason this was obnoxious was I for the longest time thought ModSec was telling me WHAT was in the payload that was the trailing garbage, being a newline(hex \\x0a) after the JSON or something lol ... which is actually valid JSON anyways according to the RFC. When really its just a message to inform you there is something wrong.\r\n\r\nSo to fix this issue please fix the error print log for json/xml parsing to not include the \r\n```[data \"JSON parsing error: parse error: trailing garbage\\x0a\"]``` print statement. So fixed would look like: ```[data \"JSON parsing error: parse error: trailing garbage\"]```\r\n\r\nFix is somewhere here, just need to strip the newline from the error var output of your 3rd party dependencies, I am C scrub so I leave it to the pros 😄 \r\n\r\nhttps://github.com/SpiderLabs/ModSecurity/blob/v3/master/src/transaction.cc#L839\r\nhttps://github.com/SpiderLabs/ModSecurity/blob/v3/master/src/transaction.cc#L814", + "summary": "Fix the JSON/XML error message logging in ModSecurity to eliminate the extra newline character. The current output includes a newline in the error message, which can mislead users into thinking the newline is part of the payload, rather than just a formatting issue.\n\n1. Review the code at the specified lines in `transaction.cc`:\n - Inspect line 839 for where the error message is constructed.\n - Check line 814 for any relevant function calls that may be affecting the output.\n\n2. Implement a solution to strip the newline character (`\\x0a`) from the error message before it is logged. Focus on modifying the string handling within the error reporting section.\n\n3. Test the changes to ensure the error messages are formatted correctly, without introducing any additional issues.\n\n4. Update the documentation or comments in the codebase to clarify the expected format of error messages post-fix. \n\n5. Submit a pull request with the implemented changes and include a description of the issue and the fix for reference.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2278", @@ -60537,6 +62645,7 @@ "node_id": "MDU6SXNzdWU1NzkyNzA5NTM=", "title": "@eq operator should produce a warning if used with non-numeric value", "body": "The @eq operator is currently intended to be used with numeric values. But, with a name like 'eq' a rule writer may inadvertently compose a rule with something like \"@eq foo\" ... which is unlikely to be what the rule writer intends.\r\n\r\nIt might be helpful if use of @eq with a non-numeric value produced at least a warning message at rule-load time.\r\n", + "summary": "Implement a warning mechanism for the @eq operator when it is used with non-numeric values. This will prevent rule writers from mistakenly using @eq with variables like \"@eq foo\", which are not intended for numeric comparison.\n\n1. Identify the current implementation of the @eq operator in the codebase.\n2. Modify the rule-loading process to check the type of the values being passed to @eq.\n3. If a non-numeric value is detected, generate a warning message indicating the misuse.\n4. Ensure the warning does not prevent the rule from loading but provides clear feedback for correction.\n5. Test the implementation with various non-numeric inputs to verify that the warning triggers correctly. \n\nBy following these steps, enhance the usability of the @eq operator and reduce logical errors in rule writing.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2281", @@ -60568,6 +62677,7 @@ "node_id": "MDU6SXNzdWU1OTI4MDE3MTE=", "title": "AuditLogs are logging binary data and getting huge", "body": "**Describe the bug**\r\nMy audit logs have begun logging binary data under the C section for request body.\r\nThis is causing my log files to grow upwards of 5GB each even though they rotate once daily.\r\n\r\nI am running the following system:\r\n\r\n> ModSecurity for Apache/2.9.2 (http://www.modsecurity.org/) configured. [-]\r\n> ModSecurity: APR compiled version=\"1.6.2\"; loaded version=\"1.6.3\"\r\n> ModSecurity: Loaded APR do not match with compiled!\r\n> ModSecurity: PCRE compiled version=\"8.39 \"; loaded version=\"8.39 2016-06-14\"\r\n> ModSecurity: LUA compiled version=\"Lua 5.1\"\r\n> ModSecurity: YAJL compiled version=\"2.1.0\"\r\n> ModSecurity: LIBXML compiled version=\"2.9.4\"\r\n\r\nI have numerous websites that are using ModSecurity through Apache but this only occurs with my installation of Nextcloud and only when very large video files (500MB and up) are uploaded from the Nextcloud app interface on my phone.\r\nIn my modsecurity.conf file I have the following set:\r\n`SecAuditLogParts ABDEFHIJZ`\r\nRemoving the `I` fixes the issue for me so I am guessing that somehow modsecurity isn't recognizing that these particular body requests are a file upload of binary data. Maybe because the data is sent in chunks. I don't know.\r\n\r\n**Logs and dumps**\r\n\r\nOutput of:\r\n 1. DebugLogs (level 9)\r\n 2. AuditLogs\r\n 3. Error logs\r\n 4. If there is a crash, the core dump file.\r\n\r\n_Notice:_ Be carefully to not leak any confidential information.\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the behavior:\r\n\r\nA **curl** command line that mimics the original request and reproduces the problem. Or a ModSecurity v3 test case.\r\n\r\n[e.g: curl \"modsec-full/ca/..\\\\..\\\\..\\\\..\\\\..\\\\..\\\\/\\\\etc/\\\\passwd\" or [issue-394.json](https://github.com/SpiderLabs/ModSecurity/blob/v3/master/test/test-cases/regression/issue-394.json)]\r\n\r\n\r\n**Expected behavior**\r\n\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): libapache2-mod-security2 2.9.2-1 \r\n - WebServer: Apache 2.4.29\r\n - OS (and distro): Ubuntu 18.04.4\r\n\r\n\r\n**Rule Set (please complete the following information):**\r\nOWASP ModSecurity Core Rule Set ver.3.2.0\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n", + "summary": "Audit the logging of binary data in ModSecurity. \n\n**Identify the Issue**:\n- Recognize that audit logs are capturing binary data in the request body, leading to excessive log file sizes (over 5GB).\n- Confirm that the problem occurs with uploads of large video files (500MB+) through the Nextcloud app interface.\n- Note the current configuration line: `SecAuditLogParts ABDEFHIJZ`, where removing the `I` resolves the issue.\n\n**Technical Details**:\n- System specifications: \n - ModSecurity version: 2.9.2\n - Apache version: 2.4.29\n - OS: Ubuntu 18.04.4\n - Installed libraries: APR (1.6.3), PCRE (8.39), LUA (5.1), YAJL (2.1.0), LIBXML (2.9.4).\n- The log files grow large due to the binary data from file uploads that ModSecurity may not interpret correctly, especially if the data is sent in chunks.\n\n**First Steps to Tackle the Problem**:\n1. Investigate the ModSecurity configuration to understand how it handles binary data in uploads.\n2. Test with different `SecAuditLogParts` settings to identify if other configurations can prevent binary data logging without removing necessary parts.\n3. Create a curl command that mimics the file upload to reproduce the issue and analyze the ModSecurity debug logs at level 9 for insights.\n4. Review ModSecurity and Apache error logs for any related warnings or errors during the upload process.\n5. Consider updating ModSecurity or its dependencies if there's a known issue with the current versions.\n\n**Next Steps**:\n- After gathering data from the steps above, analyze the results and adjust the configuration to optimize logging without losing critical information.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2289", @@ -60598,6 +62708,7 @@ "node_id": "MDU6SXNzdWU2MTIxNTQzOTM=", "title": "MS3 atomic updates of persistent integer variables doesnt work in MT environment", "body": "**Describe the bug**\r\n\r\nI added the following rule to OWASP CRS v3.0.2 running in Modsecurity 3.\r\n\r\nSecRule &ARGS \"@eq 0\" \"id:942001, phase:2,nolog,pass, setvar:'resource.xcount=+1'\"\r\n\r\nand the following configuration to initialize a resource collection:\r\n\r\nSecAction \"id:90003,phase:1,nolog,pass,initcol:resource=%{REQUEST_HEADERS:Host}\"\r\n\r\n\r\nExamining LMDB after pushing 1000s of requests that match this rule through modsec shows:\r\n - for single threaded modsec, the counter is always correct\r\n - for multithreaded modsec, the counter is off by small amount\r\n\r\n\r\nWhich indicates to me the read-modify-write of the counter is not atomic and some threads are very occasionally pushing back an incremented obsolete counter value.\r\n\r\nPer https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-(v2.x)#persistent-storage - seems I should be able to atomically update a persistent storage variable in a modsec3 multithreaded environment. \r\n\r\nI am running libmodsecurity v3.0.4 with nginx 1.15.8 and v1.0.1 of the nginx modsec connector. libmodsecurity and the LMDB consumer are both built with Version: LMDB 0.9.24: (July 24, 2019)\r\n\r\nIf I send 1000s of requests to nginx+modsec serially via ' ab -n 5000 -c 1 -H \"Host: www.yssltest.com\" http://127.0.0.1:80/index.html', the counter is correct after all requests are processed. ab's report matches the number of nginx access log entries, matches the counter in LMDB.\r\n\r\nIf I use \"ab -n 5000 -c 10 -H \"Host: www.yssltest.com\" (sending requests on 10 concurrent connections) ab's report matches the number of nginx access log entries but the counter in LMDB is behind by a small amount (usually ~5 lost counts for 5000 requests).\r\n\r\nThe host is a 4 core system, nginx is configured for a master process, 4 worker processes, and 1024 connections.\r\n\r\n**Logs and dumps**\r\n\r\nOutput of:\r\n 1. DebugLogs (level 9)\r\n 2. AuditLogs\r\n 3. Error logs\r\n 4. If there is a crash, the core dump file.\r\n\r\n_Notice:_ Be carefully to not leak any confidential information.\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the behavior:\r\n\r\nA **curl** command line that mimics the original request and reproduces the problem. Or a ModSecurity v3 test case.\r\n\r\n[e.g: curl \"modsec-full/ca/..\\\\..\\\\..\\\\..\\\\..\\\\..\\\\/\\\\etc/\\\\passwd\" or [issue-394.json](https://github.com/SpiderLabs/ModSecurity/blob/v3/master/test/test-cases/regression/issue-394.json)]\r\n\r\n\r\n**Expected behavior**\r\n\r\nI expected the counter in persistent collection in LMDB to match the number of rule hits both in a single thread environment and a multithreaded environment. \r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity v3.0.4 with nginx-connector v1.0.1]\r\n - nginx-1.15.8\r\n - LMDB 0.9.24: (July 24, 2019)\r\n - host is CentOS7 7.6.1810\r\n\r\n**Rule Set (please complete the following information):**\r\n - OWAS CRS v3.0.2\r\n\r\n**Additional context**\r\n\r\nab output showing 1000 requests:\r\n\r\nmike@centos7:~$ ab -n 1000 -c 10 -H \"Host: www.waftest.com\" http://127.0.0.1:80/index.html\r\nThis is ApacheBench, Version 2.3 <$Revision: 1430300 $>\r\nCopyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/\r\nLicensed to The Apache Software Foundation, http://www.apache.org/\r\n\r\nBenchmarking 127.0.0.1 (be patient)\r\nCompleted 100 requests\r\nCompleted 200 requests\r\nCompleted 300 requests\r\nCompleted 400 requests\r\nCompleted 500 requests\r\nCompleted 600 requests\r\nCompleted 700 requests\r\nCompleted 800 requests\r\nCompleted 900 requests\r\nCompleted 1000 requests\r\nFinished 1000 requests\r\n\r\n\r\nServer Software: nginx/1.15.8\r\nServer Hostname: 127.0.0.1\r\nServer Port: 80\r\n\r\nDocument Path: /index.html\r\nDocument Length: 612 bytes\r\n\r\nConcurrency Level: 10\r\nTime taken for tests: 25.736 seconds\r\nComplete requests: 1000\r\nFailed requests: 0\r\nWrite errors: 0\r\nTotal transferred: 845000 bytes\r\nHTML transferred: 612000 bytes\r\nRequests per second: 38.86 [#/sec] (mean)\r\nTime per request: 257.365 [ms] (mean)\r\nTime per request: 25.736 [ms] (mean, across all concurrent requests)\r\nTransfer rate: 32.06 [Kbytes/sec] received\r\n\r\nConnection Times (ms)\r\n min mean[+/-sd] median max\r\nConnect: 0 0 0.0 0 0\r\nProcessing: 54 256 129.1 220 916\r\nWaiting: 15 166 103.6 130 839\r\nTotal: 54 256 129.1 220 916\r\n\r\nPercentage of the requests served within a certain time (ms)\r\n 50% 220\r\n 66% 273\r\n 75% 320\r\n 80% 348\r\n 90% 437\r\n 95% 520\r\n 98% 596\r\n 99% 654\r\n 100% 916 (longest request)\r\nmike@centos7:~$\r\n\r\n\r\nNGINX access logs showing 1000 processed requests, all receiving 200/OK:\r\nmike@centos7:/opt/lmdb_test$ sudo rm /var/log/nginx/access.log\r\n... test runs...\r\nmike@centos7:/opt/lmdb_test$ sudo wc -l /var/log/nginx/access.log\r\n1000 /var/log/nginx/access.log\r\n\r\nAll lines in access log are \"[04/May/2020:16:xx:xx -0400] - www.waftest.com - 127.0.0.1 \"GET /index.html HTTP/1.0\" 200 \"-\" \"ApacheBench/2.3\" - - 0.000\"\r\n\r\n\r\nNGINX error logs:\r\nno entries\r\n\r\nRead of LMDB after MT run, counter is off by 1 (it reads 999):\r\n\r\nmike@centos7:/opt/ModSecurity$ sudo /opt/lmdb_test/miketest /opt/ModSecurity-nginx/modsec-shared-collections\r\nEnvironment Info\r\n Map address: 0x7f123b817000\r\n Map size: 10485760\r\n Page size: 4096\r\n Max pages: 2560\r\n Number of pages used: 8\r\n Last transaction ID: 7000\r\n Max readers: 126\r\n Number of readers used: 0\r\n Version: LMDB 0.9.24: (July 24, 2019)\r\n...\r\nkey: 0x7f123b819fe4 www.waftest.com::::xcount, data: 0x7f123b819ffd 999\r\n\r\n\r\nHere is the counter added to CRS REQUEST-942-APPLICATION-ATTACK-SQLI.conf (complete rule file uploaded):\r\n[REQUEST-942-APPLICATION-ATTACK-SQLI.conf.txt](https://github.com/SpiderLabs/ModSecurity/files/4577260/REQUEST-942-APPLICATION-ATTACK-SQLI.conf.txt)\r\n\r\nSecRule &ARGS \"@eq 0\" \"id:942001,\\\r\n phase:2,nolog,pass,\\\r\n setvar:'tx.xstart=%{DURATION}',\\\r\n setvar:'resource.xcount=+1'\"\r\n\r\n\r\n\r\nCollection is initialized in modsecurity.conf (complete modsec conf uploaded):\r\n\r\n[modsecurity.conf.txt](https://github.com/SpiderLabs/ModSecurity/files/4577276/modsecurity.conf.txt)\r\nSecAction \"id:90003,phase:1,nolog,pass,initcol:resource=%{REQUEST_HEADERS:Host}\"\r\n", + "summary": "**Investigate and Resolve Non-Atomic Updates of Persistent Integer Variables in MT Environment**\n\n**Problem Overview:**\nPersistent integer variables in ModSecurity's OWASP CRS v3.0.2 are not updating correctly in a multithreaded (MT) environment. When using the `setvar` directive to increment a counter based on a rule match, discrepancies occur in the counter values, indicating that the read-modify-write operation is not atomic. In single-threaded scenarios, the counter remains accurate, while in multithreaded tests, it shows a small deficit in counts.\n\n**Technical Details:**\n- Environment: \n - ModSecurity v3.0.4 \n - Nginx v1.15.8 \n - LMDB 0.9.24 \n - CentOS7 7.6.1810\n- Issue occurs when running concurrent requests with Apache Benchmark (ab) with command:\n ```\n ab -n 5000 -c 10 -H \"Host: www.yssltest.com\" http://127.0.0.1:80/index.html\n ```\n- The expected behavior is that the counter in LMDB should match the number of hits from the rule across both single-threaded and multithreaded executions. However, in MT, the counter often reads 999 after 1000 requests.\n\n**Initial Steps for Resolution:**\n1. **Review Documentation:** Verify the capabilities of persistent storage updates in a multithreaded context as referenced in the ModSecurity documentation. Ensure that the configuration aligns with expected atomic operations.\n2. **Debug Logging:** Increase the logging verbosity to capture detailed transaction logs during concurrent requests. Focus on the sequence of `setvar` executions.\n3. **Test with Different Versions:** Experiment with newer versions of ModSecurity or LMDB to determine if the issue persists. Updates may include bug fixes that address atomicity in MT environments.\n4. **Implement Mutexes:** Explore implementing mutexes within the ModSecurity rule processing logic to ensure that increments to the counter are thread-safe.\n5. **Run Isolated Tests:** Isolate the variable updates in a controlled environment to reproduce the issue consistently. Use custom scripts to simulate concurrent requests and log the counter changes.\n6. **Reach Out for Support:** If the problem continues, consider opening a discussion or support ticket with the ModSecurity development community for additional insights.\n\n**Follow-Up Actions:**\n- Document findings and any modifications made during testing.\n- Prepare test cases that illustrate the problem for further analysis.\n- Monitor any changes made to the codebase or configurations that could affect atomic operations in future versions.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2306", @@ -60628,6 +62739,7 @@ "node_id": "MDU6SXNzdWU2MjQ4OTc5Mjg=", "title": "SecRuleRemoveById conflict with other Sec*", "body": "**Describe the bug**\r\nI created a configuration, based on [this article](https://johnleach.co.uk/posts/2012/05/15/rate-limiting-with-apache-and-mod-security/), to rate limit a specific url. I'm using apache + modsecurity. This configuration works fine, but I noticed that if I have any SecRuleRemoveById directive in any of my *.conf files, the rate limit doesn't work anymore.\r\n\r\n**Logs and dumps and To Reproduce**\r\nIt's works:\r\n```\r\nSecRuleEngine On\r\nSecStatusEngine On\r\n\r\n SecAction initcol:ip=%{X-Forwarded-For},pass,nolog,id:11\r\n SecAction \"phase:5,deprecatevar:ip.somepathcounter=1/1,pass,nolog,id:12\"\r\n SecRule IP:SOMEPATHCOUNTER \"@gt 60\" \"phase:2,pause:300,deny,status:429,setenv:RATELIMITED,skip:1,nolog,id:13\"\r\n SecAction \"phase:2,pass,setvar:ip.somepathcounter=+1,expirevar:ip.somepathcounter=600,nolog,id:14\"\r\n Header always set Retry-After \"10\" env=RATELIMITED\r\n\r\n ProxyPass \"http://influxdb:8086\"\r\n ProxyPassReverse \"http://influxdb:8086\"\r\n \r\n\r\nErrorDocument 429 \"Rate Limit Exceeded\"\r\n````\r\nThe output of my test script is: \r\n```\r\n58 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n59 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n60 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n61 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n62 - Status code: 429 - Rate Limit Exceeded\r\n63 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n64 - Status code: 429 - Rate Limit Exceeded\r\n65 - Status code: 429 - Rate Limit Exceeded\r\n66 - Status code: 429 - Rate Limit Exceeded\r\n````\r\n\r\nIt doesn't work:\r\n````\r\nSecRuleEngine On\r\nSecStatusEngine On\r\n\r\n SecAction initcol:ip=%{X-Forwarded-For},pass,nolog,id:11\r\n SecAction \"phase:5,deprecatevar:ip.somepathcounter=1/1,pass,nolog,id:12\"\r\n SecRule IP:SOMEPATHCOUNTER \"@gt 60\" \"phase:2,pause:300,deny,status:429,setenv:RATELIMITED,skip:1,nolog,id:13\"\r\n SecAction \"phase:2,pass,setvar:ip.somepathcounter=+1,expirevar:ip.somepathcounter=600,nolog,id:14\"\r\n Header always set Retry-After \"10\" env=RATELIMITED\r\n\r\n ProxyPass \"http://influxdb:8086\"\r\n ProxyPassReverse \"http://influxdb:8086\"\r\n \r\n\r\nErrorDocument 429 \"Rate Limit Exceeded\"\r\n\r\nInclude /etc/apache2/conf-available/modsecurity_rules.conf\r\n````\r\nContent of modsecurity_rules.conf:\r\n````\r\nSecRuleRemoveById 932130 \r\nSecRuleRemoveById 932100 \r\nSecRuleRemoveById 932105\r\nSecRuleRemoveById 932140\r\nSecRuleRemoveById 942220\r\nSecRuleRemoveById 920180 \r\nSecRuleRemoveById 980130\r\nSecRuleRemoveById 920420\r\n````\r\nIf I comment ALL of these SecRuleRemoveById, the rate limit works. \r\nIf I uncomment ANY of these SecRuleRemoveById, the rate limit doesn't work. In this scenario, the output of my test script is: \r\n```\r\n58 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n59 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n60 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n61 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n62 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n63 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n64 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n65 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n66 - Status code: 200 - {\"version\":\"1.8.0\"}\r\n`````\r\n**Expected behavior**\r\n - SecRuleRemoveById directive shouldn't affect the rate limit script.\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): [v2.9.3 with modSecurity-CRS: 3.2.0-1]\r\n - WebServer: [apache v2.4.43]\r\n - OS (and distro): [Linux Ubuntu Focal (20.04), running in Docker]\r\n\r\n**Rule Set (please complete the following information):**\r\n - modSecurity-CRS: 3.2.0-1\r\n\r\n**Additional context**\r\nI develop a script in python to test the rate limit:\r\n```\r\nimport requests \r\nURL = \"https://localhost/influxdb/ping\"\r\nPARAMS = {'verbose':'true'} \r\nfor x in range(10000):\r\n r = requests.get(url = URL, params = PARAMS, verify=False) \r\n print(str(x) + ' - Status code: ' + str(r.status_code) + ' - ' + r.text)\r\nprint('Finish')\r\n````", + "summary": "**Identify and Resolve SecRuleRemoveById Conflict with Rate Limiting in Apache + ModSecurity**\n\n**Determine the Issue:**\nInvestigate why the rate limiting configuration for a specific URL in your Apache + ModSecurity setup ceases to function when any `SecRuleRemoveById` directive is included in the configuration. Confirm that when all `SecRuleRemoveById` lines are commented out, the rate limiting works correctly.\n\n**Technical Analysis:**\n1. **Review the configuration context**: Ensure that the `SecRuleRemoveById` directives do not inadvertently disable rules necessary for the rate limiting logic to execute. The rate limiting setup involves several key rules, particularly the one that checks if `IP:SOMEPATHCOUNTER` exceeds 60.\n \n2. **Log analysis**: Check the ModSecurity logs for any indications of the rules being triggered or skipped when the `SecRuleRemoveById` directives are active. This may provide insight into whether specific rules are preventing the desired behavior.\n\n3. **Rule dependencies**: Analyze the ModSecurity rules that are being removed. Understand which of the removed rules might interact with the rate limiting logic. Pay special attention to any rules that involve IP tracking, variable handling, or request throttling.\n\n**First Steps to Tackle the Problem:**\n1. **Isolate the removed rules**: Start by commenting out all `SecRuleRemoveById` directives and then uncomment them one by one to pinpoint the specific rule(s) that cause the conflict.\n\n2. **Modify or add exceptions**: If a particular rule is identified as problematic, consider modifying that rule or adding an exception to ensure it does not interfere with the functioning of your rate limiting logic. \n\n3. **Test incremental changes**: After adjusting the configuration, use the provided Python script to test the rate limit functionality, ensuring that the status codes reflect the expected behavior.\n\n4. **Consult documentation**: Review the ModSecurity documentation for insights on how `SecRuleRemoveById` interacts with other rules, particularly those related to IP collection and variable management.\n\n5. **Community support**: If the issue remains unresolved, consider reaching out to the ModSecurity community or forums for additional support and shared experiences regarding similar configurations.\n\nBy following these steps, identify the cause of the conflict and restore the intended rate limiting functionality while maintaining the necessary security configurations.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2325", @@ -60645,8 +62757,8 @@ 939 ], "labels": [ - 398, - 403 + 403, + 398 ] } }, @@ -60659,6 +62771,7 @@ "node_id": "MDU6SXNzdWU2Mzk2NTg0NzY=", "title": "Timezone is not included in the time_stamp field of audit log in JSON format", "body": "**Description**\r\n\r\nTimezone is not included in the `time_stamp` field of audit log if `SecAuditLogFormat` is set to `JSON`\r\n\r\nIf `SecAuditLogFormat` is set to `Native`, the time and date are written to audit log like following:\r\n```\r\n---immbqR4e---A--\r\n[16/Jun/2020:11:24:03 +0300]\r\n```\r\nKeeping all other setting the same, but setting `SecAuditLogFormat` to `JSON`, the time and date are written to audit log like following:\r\n```\r\n\"time_stamp\":\"Tue Jun 16 11:24:03 2020\"\r\n```\r\nNote that it's not UTC time, it's local time on the server.\r\n\r\n**Steps to reproduce the behavior**\r\n\r\n1. Configure `SecAuditLogFormat` to `JSON`\r\n2. Make sure 'A' section is enabled in `SecAuditLogFormat` parameter. Ex.: `SecAuditLogParts ABJFHZ`\r\n3. Restart nginx to apply new settings\r\n4. Perform _any_ request to the web server that would get a new record put into audit log\r\n5. Check the audit log, specifically `time_stamp` field in the latest record\r\n\r\n**Expected behavior**\r\n\r\n- Either timezone is specified in `time_stamp` field when `SecAuditLogFormat` is set to `JSON`\r\n- Or `time_stamp` always contains UTC time and not local time\r\n- Or there's an option to set up `time_stamp` format in configuration file (which I couldn't find)\r\n\r\n**Rationale**\r\n\r\nImagine modsecurity audit logs are shipped to ELK or other log management system from multiple servers, including those located in regions with with daylight saving time. Then there's no common way to correctly parse the `time_stamp` field given that different servers might be in different timezone and also timezone is not persistent for some of them.\r\n\r\n**Server**\r\n - ModSecurity v3.0.4 with nginx-connector v1.0.1\r\n - WebServer: ngingx-1.18.0-1~bionic\r\n - OS: Ubuntu 18.04.4\r\n", + "summary": "Include timezone in the `time_stamp` field of the audit log when `SecAuditLogFormat` is set to `JSON`. The current implementation outputs the `time_stamp` without timezone information, leading to issues when aggregating logs from multiple servers with different time zones.\n\n### Steps to tackle the problem:\n\n1. **Review the Codebase:**\n - Investigate the source code responsible for generating the JSON audit log, focusing on the `time_stamp` field.\n - Identify where the timestamp is formatted and outputted.\n\n2. **Modify Timestamp Formatting:**\n - Adjust the code to include timezone information in the `time_stamp`. Options could be:\n - Append the server's timezone to the timestamp.\n - Convert the timestamp to UTC and include it.\n\n3. **Implement Configuration Option:**\n - Consider adding a configuration directive to allow users to choose their preferred timestamp format (with timezone, UTC, etc.).\n\n4. **Testing:**\n - Create test cases to ensure that timestamps are correctly formatted with timezone information.\n - Validate that logs from servers in different time zones are correctly parsed when ingested into log management systems like ELK.\n\n5. **Documentation:**\n - Update the documentation to reflect any new configuration options or changes in behavior.\n\n6. **Community Feedback:**\n - Engage with the community for feedback on the proposed changes and ensure it meets various use cases.\n\n### Expected Outcome:\nEnsure consistent and correct timestamp representation in audit logs across different server setups to facilitate better log management and analysis.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2340", @@ -60676,9 +62789,9 @@ 977 ], "labels": [ + 409, 396, - 399, - 409 + 399 ] } }, @@ -60691,6 +62804,7 @@ "node_id": "MDU6SXNzdWU2Mzk3NDIyNDk=", "title": "PATCH method dies with mod security with nginx configured as proxy/ingress", "body": "Version info:\r\n\r\nversion: libmodsecurity.so.3.0.3\r\n\r\nname: nginx-ingress\r\nrepository: https://kubernetes-charts.storage.googleapis.com\r\nversion: 1.33.5\r\nkubectl version\r\nServer Version: version.Info{Major:\"1\", Minor:\"16\", GitVersion:\"v1.16.9\", GitCommit:\"a17149e1a189050796ced469dbd78d380f2ed5ef\", GitTreeState:\"clean\", BuildDate:\"2020-04-16T23:15:50Z\", GoVersion:\"go1.13.9\", Compiler:\"gc\", Platform:\"linux/amd64\"}\r\n\r\n===> cross-post from https://github.com/kubernetes/ingress-nginx/issues/5723\r\n\r\nSummary Observations:\r\nPATCH method with JSON request body sent to ingress receives no response bytes on connection and connection is left open.\r\n\r\nConnection is closed only when NGINX ingress is bounced due to server reload (on change of ConfigMap, pod deletion ... etc)\r\n\r\nwhen Debug logging is enabled with\r\nSecDebugLog /dev/stdout\r\nSecDebugLogLevel 4\r\n\r\nthe debug log shows that phase 1 of the Modsecurity (a) recognized the \"application/json\", and that (b) phase 2 assigned the JSON attributes into ARGS for subsequent scanning.\r\n\r\n** Changing from \"DetectOnly\" to rule enforcement does not change the behavior. Adding the CRS ruleset does not change the behavior.\r\n\r\n** POST and PUT methods work without hanging the connection\r\n** PATCH method works correctly when enable-modsecurity: \"false\"\r\n\r\n====================================\r\n", + "summary": "Investigate the issue with the PATCH method hanging when using ModSecurity with NGINX configured as a proxy/ingress. Ensure to replicate the environment with the following specifications:\n\n- Use libmodsecurity version 3.0.3.\n- Deploy NGINX ingress version 1.33.5.\n- Confirm Kubernetes server version is v1.16.9.\n\n### Steps to Diagnose and Resolve:\n\n1. **Verify Configuration**: Check the ModSecurity configuration to ensure proper directives are set for handling the PATCH method.\n\n2. **Enable Debug Logging**: Keep the debug logging active by setting:\n ```\n SecDebugLog /dev/stdout\n SecDebugLogLevel 4\n ```\n Analyze the logs for any anomalies during the request processing, especially for PATCH requests.\n\n3. **Test Behavior**: \n - Send a PATCH request with a JSON body and observe the response. Note that the connection remains open.\n - Conduct similar tests with POST and PUT methods to confirm they operate normally.\n\n4. **Review ModSecurity Rules**: \n - Ensure that changing the rule enforcement from \"DetectOnly\" to active enforcement does not alter the behavior.\n - Validate the impact of adding the Core Rule Set (CRS) on PATCH requests.\n\n5. **Check for Known Issues**: Review the linked issue on GitHub for any similar reports or additional insights.\n\n6. **Consider Disabling ModSecurity**: Temporarily set `enable-modsecurity: \"false\"` to confirm that the PATCH method works without ModSecurity interference.\n\n7. **Experiment with Custom Rules**: If necessary, modify or create custom ModSecurity rules to handle PATCH requests specifically.\n\nBy following these steps, you should be able to isolate the issue and potentially implement a fix or workaround for the hanging PATCH method in your NGINX ingress setup.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2341", @@ -60706,9 +62820,9 @@ "repository": 1213, "assignees": [], "labels": [ - 399, + 409, 404, - 409 + 399 ] } }, @@ -60721,6 +62835,7 @@ "node_id": "MDU6SXNzdWU2NDIwMzE0NDI=", "title": "Memory pointer error in JSON sanitization", "body": "In json_add_argument(), we calculate the offset of the data to sanitize the following way:\r\n arg->value_origin_offset = value - base_offset;\r\n'value' is a parameter, 'base_offset' is a pointer containing the original string to sanitize.\r\nUsually, 'value' is a pointer in the original string pointed by 'base_offset'.\r\nBUT, when unescaping the string in yajl_do_parse() on line 253, we unescape it in a new string (hand->decodeBuf->data) and pass this new string - that is in a totally different memory space - to the yajl_string() function, passing it to json_add_argument().\r\n\r\nImpact: this could lead to a major memory corruption (putting '*' characters in an invalid part of memory)\r\n\r\nSolution: we need to pass the original string to yajl_string().\r\nHopefully, unescaping the string is always shorter, so we can copy the result inline:\r\n```\r\n- _CC_CHK(hand->callbacks->yajl_string(\r\n hand->ctx, yajl_buf_data(hand->decodeBuf),\r\n yajl_buf_len(hand->decodeBuf)));\r\n+ bufLen = yajl_buf_len(hand->decodeBuf);\r\n+ strcpy(buf, yajl_buf_data(hand->decodeBuf));\r\n+ _CC_CHK(hand->callbacks->yajl_string(hand->ctx, buf, bufLen));\r\n```\r\n\r\nAs this modifies the original input string, we need to copy it before parsing it.\r\nIn msc_json.c in json_process_chunk():\r\n```\r\n- base_offset = buf;\r\n+ base_offset = apr_pstrmemdup(msr->mp, buf, size);\r\n+ if (!base_offset) return -1;\r\n\r\n /* Feed our parser and catch any errors */\r\n- msr->json->status = yajl_parse(msr->json->handle, buf, size);\r\n+ msr->json->status = yajl_parse(msr->json->handle, base_offset, size);\r\n```", + "summary": "Fix memory pointer error in JSON sanitization by ensuring correct handling of string pointers. \n\n1. Modify the function `json_add_argument()` to ensure that the original string is passed to `yajl_string()`. Instead of using the new unescaped string from `hand->decodeBuf`, copy the data into a buffer before passing it along.\n\n2. Implement the following code changes in `json_process_chunk()` within `msc_json.c`:\n - Change the assignment of `base_offset` to create a duplicate of the input string:\n ```c\n base_offset = apr_pstrmemdup(msr->mp, buf, size);\n if (!base_offset) return -1;\n ```\n - Ensure that the parser uses the duplicated string:\n ```c\n msr->json->status = yajl_parse(msr->json->handle, base_offset, size);\n ```\n\n3. In the decoding process, replace the current call to `yajl_string()` with the following:\n ```c\n bufLen = yajl_buf_len(hand->decodeBuf);\n strcpy(buf, yajl_buf_data(hand->decodeBuf));\n _CC_CHK(hand->callbacks->yajl_string(hand->ctx, buf, bufLen));\n ```\n\nBy following these steps, you will prevent memory corruption due to invalid pointer references. Ensure to test the changes thoroughly to verify that no new issues arise from these modifications.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2344", @@ -60738,8 +62853,8 @@ 977 ], "labels": [ - 398, - 400 + 400, + 398 ] } }, @@ -60752,6 +62867,7 @@ "node_id": "MDU6SXNzdWU2NTM5NTE5MDU=", "title": "Command nginx -t create file modsec-shared-collections in directory where it run", "body": "Nginx build with connector modsecurity-nginx. Modsecurity build with lmdb support (`--with-lmdb`)\r\n```\r\nserver /etc/nginx # ls -al\r\n...\r\n-rw-r--r-- 1 root root 3957 Aug 10 2017 mime.types\r\ndrwxr-xr-x 2 root root 4096 Feb 19 12:38 modsec\r\n-rw-r--r-- 1 root root 5494 Jan 23 20:55 nginx.conf\r\ndrwxr-xr-x 2 root root 4096 Jul 9 12:54 sites-available\r\ndrwxr-xr-x 2 root root 4096 Jul 9 12:55 sites-enabled\r\n\r\nserver /etc/nginx # nginx -t\r\nnginx: [emerg] too long parameter \"...\" started in /etc/nginx/sites-enabled/modsec-shared-collections:1\r\nnginx: configuration file /etc/nginx/nginx.conf test failed\r\nserver /etc/nginx # ls -al\r\n...\r\n-rw-r--r-- 1 root root 3957 Aug 10 2017 mime.types\r\ndrwxr-xr-x 2 root root 4096 Feb 19 12:38 modsec\r\n-rw-r--r-- 1 root root 1048576 Jul 9 12:56 modsec-shared-collections\r\n-rw-r--r-- 1 root root 8192 Jul 9 12:56 modsec-shared-collections-lock\r\n```\r\n\r\n```\r\nserver ~/test# ls -l\r\ntotal 0\r\nserver ~/test # nginx -t\r\nnginx: the configuration file /etc/nginx/nginx.conf syntax is ok\r\nnginx: configuration file /etc/nginx/nginx.conf test is successful\r\nserver ~/test # ls -l\r\ntotal 12\r\n-rw-r--r-- 1 root root 1048576 Jul 9 13:23 modsec-shared-collections\r\n-rw-r--r-- 1 root root 8192 Jul 9 13:23 modsec-shared-collections-lock\r\n```\r\n\r\nThis is very strange behavior. If I change directory to `/etc/nginx`, then run `nginx -t` - after that my nginx config is broken.\r\n\r\n```\r\nldd /usr/local/modsecurity/lib/libmodsecurity.so\r\n\tlinux-vdso.so.1 (0x00007ffc5f95e000)\r\n\tlibcurl.so.4 => /usr/lib/x86_64-linux-gnu/libcurl.so.4 (0x00007f339cc37000)\r\n\tlibrt.so.1 => /lib/x86_64-linux-gnu/librt.so.1 (0x00007f339ca2f000)\r\n\tlibxml2.so.2 => /usr/lib/x86_64-linux-gnu/libxml2.so.2 (0x00007f339c674000)\r\n\tliblmdb.so.0 => /usr/lib/x86_64-linux-gnu/liblmdb.so.0 (0x00007f339c45f000)\r\n\tlibpcre.so.3 => /lib/x86_64-linux-gnu/libpcre.so.3 (0x00007f339c1ed000)\r\n\tlibstdc++.so.6 => /usr/lib/x86_64-linux-gnu/libstdc++.so.6 (0x00007f339be6b000)\r\n\tlibm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 (0x00007f339bb67000)\r\n\tlibc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f339b7c8000)\r\n\t/lib64/ld-linux-x86-64.so.2 (0x00007f339d2de000)\r\n\tlibgcc_s.so.1 => /lib/x86_64-linux-gnu/libgcc_s.so.1 (0x00007f339b5b1000)\r\n\tlibnghttp2.so.14 => /usr/lib/x86_64-linux-gnu/libnghttp2.so.14 (0x00007f339b38b000)\r\n\tlibidn2.so.0 => /usr/lib/x86_64-linux-gnu/libidn2.so.0 (0x00007f339b169000)\r\n\tlibrtmp.so.1 => /usr/lib/x86_64-linux-gnu/librtmp.so.1 (0x00007f339af4c000)\r\n\tlibssh2.so.1 => /usr/lib/x86_64-linux-gnu/libssh2.so.1 (0x00007f339ad1f000)\r\n\tlibpsl.so.5 => /usr/lib/x86_64-linux-gnu/libpsl.so.5 (0x00007f339ab11000)\r\n\tlibssl.so.1.0.2 => /usr/lib/x86_64-linux-gnu/libssl.so.1.0.2 (0x00007f339a8a8000)\r\n\tlibcrypto.so.1.0.2 => /usr/lib/x86_64-linux-gnu/libcrypto.so.1.0.2 (0x00007f339a442000)\r\n\tlibgssapi_krb5.so.2 => /usr/lib/x86_64-linux-gnu/libgssapi_krb5.so.2 (0x00007f339a1f7000)\r\n\tlibkrb5.so.3 => /usr/lib/x86_64-linux-gnu/libkrb5.so.3 (0x00007f3399f1d000)\r\n\tlibk5crypto.so.3 => /usr/lib/x86_64-linux-gnu/libk5crypto.so.3 (0x00007f3399cea000)\r\n\tlibcom_err.so.2 => /lib/x86_64-linux-gnu/libcom_err.so.2 (0x00007f3399ae6000)\r\n\tliblber-2.4.so.2 => /usr/lib/x86_64-linux-gnu/liblber-2.4.so.2 (0x00007f33998d7000)\r\n\tlibldap_r-2.4.so.2 => /usr/lib/x86_64-linux-gnu/libldap_r-2.4.so.2 (0x00007f3399686000)\r\n\tlibz.so.1 => /lib/x86_64-linux-gnu/libz.so.1 (0x00007f339946c000)\r\n\tlibpthread.so.0 => /lib/x86_64-linux-gnu/libpthread.so.0 (0x00007f339924f000)\r\n\tlibdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007f339904b000)\r\n\tlibicui18n.so.57 => /usr/lib/x86_64-linux-gnu/libicui18n.so.57 (0x00007f3398bd1000)\r\n\tlibicuuc.so.57 => /usr/lib/x86_64-linux-gnu/libicuuc.so.57 (0x00007f3398829000)\r\n\tlibicudata.so.57 => /usr/lib/x86_64-linux-gnu/libicudata.so.57 (0x00007f3396dac000)\r\n\tliblzma.so.5 => /lib/x86_64-linux-gnu/liblzma.so.5 (0x00007f3396b86000)\r\n\tlibunistring.so.0 => /usr/lib/x86_64-linux-gnu/libunistring.so.0 (0x00007f339686f000)\r\n\tlibgnutls.so.30 => /usr/lib/x86_64-linux-gnu/libgnutls.so.30 (0x00007f33964d6000)\r\n\tlibhogweed.so.4 => /usr/lib/x86_64-linux-gnu/libhogweed.so.4 (0x00007f33962a1000)\r\n\tlibnettle.so.6 => /usr/lib/x86_64-linux-gnu/libnettle.so.6 (0x00007f339606a000)\r\n\tlibgmp.so.10 => /usr/lib/x86_64-linux-gnu/libgmp.so.10 (0x00007f3395de7000)\r\n\tlibgcrypt.so.20 => /lib/x86_64-linux-gnu/libgcrypt.so.20 (0x00007f3395ad7000)\r\n\tlibkrb5support.so.0 => /usr/lib/x86_64-linux-gnu/libkrb5support.so.0 (0x00007f33958cb000)\r\n\tlibkeyutils.so.1 => /lib/x86_64-linux-gnu/libkeyutils.so.1 (0x00007f33956c7000)\r\n\tlibresolv.so.2 => /lib/x86_64-linux-gnu/libresolv.so.2 (0x00007f33954b0000)\r\n\tlibsasl2.so.2 => /usr/lib/x86_64-linux-gnu/libsasl2.so.2 (0x00007f3395295000)\r\n\tlibp11-kit.so.0 => /usr/lib/x86_64-linux-gnu/libp11-kit.so.0 (0x00007f3395030000)\r\n\tlibidn.so.11 => /lib/x86_64-linux-gnu/libidn.so.11 (0x00007f3394dfc000)\r\n\tlibtasn1.so.6 => /usr/lib/x86_64-linux-gnu/libtasn1.so.6 (0x00007f3394be9000)\r\n\tlibgpg-error.so.0 => /lib/x86_64-linux-gnu/libgpg-error.so.0 (0x00007f33949d5000)\r\n\tlibffi.so.6 => /usr/lib/x86_64-linux-gnu/libffi.so.6 (0x00007f33947cc000)\r\n\r\n```", + "summary": "Investigate the issue of the `nginx -t` command creating the `modsec-shared-collections` file in the current working directory. Ensure that Nginx is built with `modsecurity-nginx` and that ModSecurity is configured with LMDB support (`--with-lmdb`).\n\n1. Verify the Nginx configuration for any possible errors, particularly in `/etc/nginx/sites-enabled/modsec-shared-collections`. The error message indicates a \"too long parameter\" issue which may be due to malformed directives or excessive length in the configuration file.\n\n2. Check the permissions and ownership of the directories and files involved. Ensure that Nginx has the appropriate permissions to read and write to the required directories.\n\n3. Run `nginx -t` in the `/etc/nginx` directory and observe the output. If the Nginx configuration is deemed valid but still produces the `modsec-shared-collections` file, further investigation into the ModSecurity configuration is needed.\n\n4. Investigate the behavior when changing directories before running the `nginx -t` command. Document any discrepancies in output or behavior based on the working directory.\n\n5. Review the linking of libraries with `ldd /usr/local/modsecurity/lib/libmodsecurity.so` to ensure all dependencies are correctly resolved and available.\n\n6. As a first step, simplify the configuration files by commenting out suspicious or complex directives, then gradually reinstate them to isolate the problematic configuration line.\n\n7. Enable logging in ModSecurity to capture more detailed error messages that could provide insight into why the configuration test fails and the unexpected file creation occurs. \n\nBy following these steps, identify the root cause of the issue and remediate it to restore proper functionality of the Nginx and ModSecurity integration.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2354", @@ -60780,6 +62896,7 @@ "node_id": "MDU6SXNzdWU2NTY2NTgzNTM=", "title": "Parser for XML namespaces in xml rules is too restrictive", "body": "**Parser for XML namespeces in XML rules is too restrictive **\r\n\r\nWe encountered a bug when porting mod_security from version 2 to version 3.\r\nThe parser, that parses the XML-namespace in XML rules is too restrictive.\r\nIt allows only namespaces beginning with http://\r\n\r\nBurt the specification from https://www.w3.org/TR/1999/REC-xml-names-19990114\r\nstates:\r\n\r\n-------\r\n2. Declaring Namespaces\r\n\r\n.....\r\n\r\n[Definition:] The attribute's value, a URI reference, is the namespace name identifying the namespace. The namespace name, to serve its intended purpose, should have the characteristics of uniqueness and persistence. It is not a goal that it be directly usable for retrieval of a schema (if any exists). An example of a syntax that is designed with these goals in mind is that for Uniform Resource Names [RFC2141]. However, it should be noted that ordinary URLs can be managed in such a way as to achieve these same goals.\r\n-------\r\n\r\nThus, namespaces do not always start with http://, but may also start with e.g. uri: \r\n\r\nWe got the rules loaded and working by just removing the check for http:// in src/actions/xmlns.cc\r\n\r\n

\r\n--- src/actions/xmlns.cc        Thu Jul  9 13:38:37 2020\r\n+++ src/actions/xmlns.cc        Thu Jul  9 13:41:00 2020\r\n@@ -27,7 +27,6 @@\r\n \r\n bool XmlNS::init(std::string *error) {\r\n     size_t pos;\r\n-    std::string http = \"http://\";\r\n \r\n     pos = m_parser_payload.find(\"=\");\r\n     if (pos == std::string::npos) {\r\n@@ -46,12 +45,6 @@\r\n     if (m_href.at(0) == '\\'' && m_href.size() > 3) {\r\n         m_href.erase(0, 1);\r\n         m_href.pop_back();\r\n-    }\r\n-\r\n-    if (m_href.compare(0, http.length(), http) != 0) {\r\n-        error->assign(\"XMLS: Missing xmlns href for prefix: \" \\\r\n-            \"`\" + m_href + \"'.\");\r\n-        return false;\r\n     }\r\n \r\n     return true;\r\n
\r\n\r\n\r\n\r\n", + "summary": "Fix the XML namespace parser in mod_security to allow for a wider range of URIs. The current implementation restricts namespaces to only those beginning with \"http://\", which contradicts the XML namespace specification from W3C that permits other formats, such as URIs starting with \"uri:\".\n\n1. Modify the parser in `src/actions/xmlns.cc` to remove the check for \"http://\".\n2. Ensure that the parser can accept various URI formats as valid namespace identifiers.\n3. Test the updated parser with XML rules that utilize different namespace formats to verify compatibility.\n4. Review the XML namespace specification to confirm compliance with various URI formats.\n\nImplement these changes to enhance the flexibility of the XML namespace parsing in mod_security.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2358", @@ -60810,6 +62927,7 @@ "node_id": "MDU6SXNzdWU2NjYxNzU2NDU=", "title": "nginx worker hangs while logging to the audit log", "body": "Hello,\r\nIn our company we use **ModSecurity** in our nginx. We noticed that sometimes **nginx** workers just hangs and do nothing, however when we `strace` the process (or attach the `gdb`), then the worker starts spinning again. This problem happens when we enable audit logging (ex: `SecAuditLog /var/log/modsec_audit.log`), I have traced the issues to the following line https://github.com/SpiderLabs/ModSecurity/blob/0eb3c123f447b8787ea726ad4d4439018a07ee31/src/utils/shared_files.cc#L236 it appers that proccess is unable to wakeup and acquire the lock for the file and it is just waiting.\r\n\r\nbacktrace:\r\n```\r\n#1 0x00007f31debc4714 in __GI___pthread_mutex_lock (mutex=0x7f31debef008) at ../nptl/pthread_mutex_lock.c:80\r\n#2 0x00007f31de95474a in modsecurity::utils::SharedFiles::write(std::__cxx11::basic_string, std::allocator > const&, std::__cxx11::basic_string, std::allocator > const&, std::__cxx11::basic_string, std::allocator >*) () from /usr/lib/x86_64-linux-gnu/libmodsecurity.so.3\r\n#3 0x00007f31de8dd9a4 in modsecurity::audit_log::writer::Serial::write(modsecurity::Transaction*, int, std::__cxx11::basic_string, std::allocator >*) () from /usr/lib/x86_64-linux-gnu/libmodsecurity.so.3\r\n#4 0x00007f31de8dcc0e in modsecurity::audit_log::AuditLog::saveIfRelevant(modsecurity::Transaction*, int) () from /usr/lib/x86_64-linux-gnu/libmodsecurity.so.3\r\n#5 0x00007f31de8cd966 in modsecurity::Transaction::processLogging() () from /usr/lib/x86_64-linux-gnu/libmodsecurity.so.3\r\n#6 0x0000563cedc9d11c in ngx_http_modsecurity_log_handler (r=) at ../modules/ngx_modsecurity_module/src/ngx_http_modsecurity_log.c:83\r\n#7 0x0000563cedc0b41d in ngx_http_log_request (r=0x563cf1a308e0) at src/http/ngx_http_request.c:3684\r\n#8 ngx_http_free_request (r=r@entry=0x563cf1a308e0, rc=) at src/http/ngx_http_request.c:3630\r\n#9 0x0000563cedc0b4f8 in ngx_http_close_request (rc=0, r=) at src/http/ngx_http_request.c:3576\r\n#10 0x0000563cedc1e90d in ngx_http_upstream_process_request (r=0x563cf1a308e0, u=0x563cf20a8538) at src/http/ngx_http_upstream.c:4122\r\n#11 0x0000563cedc2275b in ngx_http_upstream_send_response (u=0x563cf20a8538, r=0x563cf1a308e0) at src/http/ngx_http_upstream.c:3306\r\n#12 ngx_http_upstream_process_header (r=0x563cf1a308e0, u=0x563cf20a8538) at src/http/ngx_http_upstream.c:2479\r\n#13 0x0000563cedc1dd92 in ngx_http_upstream_handler (ev=) at src/http/ngx_http_upstream.c:1329\r\n#14 0x0000563cedbf6836 in ngx_epoll_process_events (cycle=, timer=, flags=) at src/event/modules/ngx_epoll_module.c:901\r\n#15 0x0000563cedbece76 in ngx_process_events_and_timers (cycle=cycle@entry=0x563cf02d4250) at src/event/ngx_event.c:260\r\n#16 0x0000563cedbf4a18 in ngx_worker_process_cycle (cycle=cycle@entry=0x563cf02d4250, data=data@entry=0x1) at src/os/unix/ngx_process_cycle.c:767\r\n#17 0x0000563cedbf3159 in ngx_spawn_process (cycle=cycle@entry=0x563cf02d4250, proc=proc@entry=0x563cedbf4990 , data=data@entry=0x1, name=name@entry=0x563ceded3328 \"worker process\", respawn=respawn@entry=-3) at src/os/unix/ngx_process.c:203\r\n#18 0x0000563cedbf5366 in ngx_start_worker_processes (cycle=cycle@entry=0x563cf02d4250, n=40, type=type@entry=-3) at src/os/unix/ngx_process_cycle.c:372\r\n#19 0x0000563cedbf581b in ngx_master_process_cycle (cycle=0x563cf02d4250) at src/os/unix/ngx_process_cycle.c:144\r\n#20 0x0000563cedbc8cad in main (argc=, argv=) at src/core/nginx.c:382\r\n```\r\n\r\nWe use the [following](https://github.com/SpiderLabs/ModSecurity-nginx) connector\r\n\r\n**server:**\r\n- nginx 1.17 - 40 workers\r\n- Debian 10 (buster)\r\n- libmodsecurity3 (3.0.3)\r\n\r\nIs this a bug or this directive cannot be used with multiple processes logging to the same file?", + "summary": "**Investigate the nginx worker hang issue related to ModSecurity audit logging.**\n\n1. **Identify the problem:** \n - Nginx workers hang when audit logging is enabled (e.g., `SecAuditLog /var/log/modsec_audit.log`).\n - Hanging occurs due to the inability to acquire a lock for the audit log file, as indicated by the stack trace.\n\n2. **Analyze the stack trace:**\n - The issue is traced to `modsecurity::utils::SharedFiles::write` where a mutex lock is attempted but fails, causing the worker to block.\n - Review the call hierarchy leading to the lock contention:\n - `modsecurity::audit_log::writer::Serial::write`\n - `modsecurity::audit_log::AuditLog::saveIfRelevant`\n - `ngx_http_modsecurity_log_handler`\n\n3. **Reproduce the issue:**\n - Set up an environment mirroring the production server with:\n - Nginx 1.17 \n - Debian 10\n - libmodsecurity3 (3.0.3)\n - Enable audit logging and simulate traffic to trigger the hang.\n\n4. **Test mutex locking behavior:**\n - Use `strace` or `gdb` to observe mutex behavior when logging is enabled.\n - Determine if the mutex is held by another process and investigate potential race conditions.\n\n5. **Evaluate configuration:**\n - Check if logging to a single file from multiple worker processes is supported by ModSecurity.\n - Consider configuring separate audit log files for each worker to prevent contention.\n\n6. **Consider potential solutions:**\n - Modify the logging configuration to use a single process for writing logs, or implement a queuing mechanism.\n - Update ModSecurity to a newer version if available, as fixes for such issues may have been released.\n - Review existing GitHub issues or discussions for similar problems and solutions.\n\n7. **Document findings:**\n - Record any changes made during troubleshooting.\n - Share insights with the team and provide recommendations based on your findings.\n\nBy following these steps, you can effectively address the nginx worker hang issue related to ModSecurity's audit logging.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2373", @@ -60828,8 +62946,8 @@ 977 ], "labels": [ - 399, - 411 + 411, + 399 ] } }, @@ -60842,6 +62960,7 @@ "node_id": "MDU6SXNzdWU2OTE0NjUxNjk=", "title": "`SecAuditLogStorageDir` behavior change from 2.9 to 3.x", "body": "**Describe the bug**\r\n\r\nGiven identical configuration (see the last section of the post for details) for Apache and mod sec 2 and thenginx connector and mod sec 3.0 `SecAuditLogStorageDir` behaves this way:\r\n\r\n1. Apache logs to `$SecAuditLogStorageDir//YYYYMMDD/…`\r\n2. nginx logs to `$SecAuditLogStorageDir/YYYYMMDD/…`\r\n\r\n**To Reproduce**\r\n\r\n1. configure an Apache machine like below\r\n2. configure an nginx machine like below\r\n3. do some traffic on both that would utilize `SecAuditLogStorageDir`\r\n4. compare `SecAuditLogStorageDir` on both machines and note the presence of `$SecAuditLogStorageDir/` on the apache one and the lack of `$SecAuditLogStorageDir/` on the nginx one\r\n\r\n**Expected behavior**\r\n\r\nthe same configuration should behave the same way between 2.9 and 3.0 the docs give no reason to think otherwise for the configuration involved, in this case nginx connector should use `$SecAuditLogStorageDir//YYYYMMDD/…`\r\n\r\n**Server:**\r\n - WebServer: apache v2.4.46\r\n - ModSecurity version: mod_security2 v2.9.3\r\n - WebServer: nginx v1.19.1\r\n - ModSecurity version (and connector): libmodsecurity v3.0.4 with nginx-connector v1.0.1]\r\n - OS (and distro): CentOS 7\r\n\r\n**Rule Set:**\r\n - only running owasp CRS v3.3.0\r\n\r\n**Additional context**\r\n\r\nbesides module loading config in both ➜\r\n\r\nApache config:\r\n\r\n```\r\nSecAuditLog logs/modsec_audit.log\r\nSecDebugLog logs/modsec_debug.log\r\nSecDebugLogLevel 0\r\nSecDefaultAction \"phase:2,deny,log,status:406\"\r\n\r\nSecAuditLogStorageDir logs/modsec_audit\r\nSecAuditLogType Concurrent\r\n\r\nSecDataDir \"/var/cpanel/secdatadir\"\r\nSecAuditEngine \"RelevantOnly\"\r\nSecRuleEngine \"On\"\r\n\r\nInclude … all the OWASP rule files here\r\n```\r\n\r\nnginx config:\r\n\r\n```\r\nmodsecurity on;\r\n\r\nmodsecurity_rules '\r\n SecAuditLog /var/log/nginx/modsec30_audit.log\r\n SecDebugLog /var/log/nginx/modsec30_debug.log\r\n SecDebugLogLevel 0\r\n SecDefaultAction \"phase:2,deny,log,status:406\"\r\n SecAuditLogStorageDir /var/log/nginx/modsec30_audit\r\n SecAuditLogType Concurrent\r\n SecAuditEngine RelevantOnly\r\n SecRuleEngine On\r\n';\r\n\r\nmodsecurity_rules_file … all the OWASP rule files here\r\n```", + "summary": "Investigate the behavior change of `SecAuditLogStorageDir` from ModSecurity 2.9 to 3.0 across Apache and Nginx configurations. \n\n1. Confirm that both Apache and Nginx have identical configurations for `SecAuditLogStorageDir` and related settings.\n2. Observe logging patterns:\n - **Apache**: Ensure logs are stored in `$SecAuditLogStorageDir//YYYYMMDD/...`.\n - **Nginx**: Note that logs are stored in `$SecAuditLogStorageDir/YYYYMMDD/...` without the `` directory.\n\n### Steps to Reproduce the Issue:\n- Set up an Apache server with the provided configuration, ensuring ModSecurity v2.9.3 is installed.\n- Set up an Nginx server with the provided configuration, ensuring ModSecurity v3.0.4 with nginx-connector v1.0.1 is in use.\n- Generate traffic on both servers that triggers the logging mechanism.\n- Compare the log storage paths to identify the absence of the `` directory in the Nginx logs.\n\n### Expected Behavior:\nThe Nginx implementation should replicate the Apache behavior, creating logs in the directory `$SecAuditLogStorageDir//YYYYMMDD/...` as documented.\n\n### First Steps to Tackle the Problem:\n1. Review the ModSecurity v3.0 documentation for any changes related to `SecAuditLogStorageDir` and user-specific logging.\n2. Check if the Nginx connector has specific configurations or limitations that differ from the Apache module.\n3. Consider filing a bug report with detailed findings to the ModSecurity GitHub repository if the behavior is inconsistent with the documentation.\n4. Collaborate with the community or maintainers to explore potential fixes or workarounds, possibly modifying configurations to achieve desired logging behavior.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2392", @@ -60872,6 +62991,7 @@ "node_id": "MDU6SXNzdWU3MDExNDIzOTY=", "title": "SecRuleScript “Example Usage” errors w/ “directive Rule id: 0 is duplicated”", "body": "**Describe the bug**\r\n\r\nhttps://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-%28v2.x%29#SecRuleScript\r\n\r\n> Example Usage: `SecRuleScript \"/path/to/file.lua\" \"block\"`\r\n\r\n`nginx -t` and `httpd -t` both error w/ `directive Rule id: 0 is duplicated`\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the behavior:\r\n\r\n0. include OWASP3 rules (v3.3.0 ATM)\r\n1. Configure `SecRuleScript` w/ an action of `\"block\"` like the doc example\r\n2. nginx -t == `directive Rule id: 0 is duplicated`\r\n3. httpd -t == `directive Rule id: 0 is duplicated`\r\n4. change `\"block\"` to \"id:12345,block\"\r\n5. nginx -t == no error\r\n6. httpd -t == no error\r\n7. go back to `\"block\"`\r\n8. remove OWASP3 rules\r\n9. nginx -t == no error\r\n10. httpd -t == no error\r\n\r\n**Expected behavior**\r\n\r\nThat the example works like it does in 2.9 w/out needing to add an `id`.\r\n\r\n**Server, Rule Set, Additional Context**\r\n\r\nSame as #2392 ", + "summary": "**Resolve SecRuleScript Directive Duplication Error**\n\n**Issue Overview**: Fix the error encountered when using `SecRuleScript` with the action `\"block\"`, which results in the message `directive Rule id: 0 is duplicated` during configuration tests with `nginx -t` and `httpd -t`.\n\n**Steps to Reproduce**:\n1. Include OWASP3 rules (version 3.3.0).\n2. Set up `SecRuleScript` with the example usage: `SecRuleScript \"/path/to/file.lua\" \"block\"`.\n3. Execute `nginx -t` and observe the error.\n4. Change the action to `\"id:12345,block\"` and test again; both `nginx` and `httpd` should work without errors.\n5. Revert back to `\"block\"` and remove OWASP3 rules, whereupon both tests should again pass without errors.\n\n**Expected Behavior**: The configuration should allow the use of `SecRuleScript` with the action `\"block\"` without requiring an explicit rule ID, similar to its behavior in version 2.9.\n\n**Next Steps**:\n1. Analyze the code handling `SecRuleScript` to understand how rule IDs are generated and managed.\n2. Investigate the integration of OWASP3 rules with the `SecRuleScript` directive, focusing on conflicts that might arise from duplicate IDs.\n3. Review the ModSecurity documentation for any changes in the handling of rule IDs in recent versions.\n4. Consider implementing a check that allows for `block` without an ID or ensure that the ID is properly generated when not specified.\n\n**Additional Context**: Refer to issue #2392 for related discussions and potential resolutions.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2400", @@ -60903,6 +63023,7 @@ "node_id": "MDU6SXNzdWU3MDY1OTY4MzQ=", "title": "Memory leak in msc_pregcomp_ex", "body": "apr_pool_cleanup_register() is called at the very end of the function.\r\nIn case any problem occurs (e.g., regex cannot compile) we return immediately, so the cleanup is never performed.\r\n\r\napr_pool_cleanup_register() should be called right after calling pcre_compile() to cleanup memory allocated by pcre.", + "summary": "Fix the memory leak in `msc_pregcomp_ex` by adjusting the placement of `apr_pool_cleanup_register()`. Currently, this function is called only at the end of `msc_pregcomp_ex`, which means it won't execute if an error occurs (e.g., failure to compile regex). \n\nTo resolve this, move the `apr_pool_cleanup_register()` call immediately after `pcre_compile()`. This adjustment ensures that memory allocated by PCRE is cleaned up even when an error occurs.\n\n### First Steps:\n1. Locate the `msc_pregcomp_ex` function in the codebase.\n2. Identify where `pcre_compile()` is called.\n3. Move the `apr_pool_cleanup_register()` call to immediately follow `pcre_compile()`.\n4. Test the function to ensure no memory leaks occur during error scenarios.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2407", @@ -60931,6 +63052,7 @@ "node_id": "MDU6SXNzdWU3MTU3MDMwMDk=", "title": "Log entries truncated", "body": "**Describe the bug**\r\n\r\nError log entries are truncated if the error log line is long enough. This cuts off information like the rule tags, sometimes even severity etc.\r\n\r\nThis comes up in the context of CRS where tags are used extensively. However, the CRS project cannot do much to mitigate this.\r\n\r\n**Logs and dumps**\r\nHere is a selection of rules where the tags are truncated. Note that sometimes the tags are truncated to different lengths for the same rule.\r\n```\r\n[user@host ~]$ cat /path/to/error.log | grep -v \"event-correlation\" | egrep \"\\[tag\" | sed 's/.*\\(\\[id \\S\\+\\]\\).*\\?\\(\\[tag.*\\)/\\1 \\2/' | sed 's/ \\[hostname.*//' | egrep -v \"\\]$\" | head -n 10000 | sort -u\r\n[id \"932110\"] [tag \"OWASP_CRS/WEB_ATTACK/COMMAND_INJECTION\"]\r\n[id \"942130\"] [tag \"attack-sqli\"] [t\r\n[id \"942200\"] [tag\r\n[id \"942200\"] [tag \"OWASP_AppSensor/CIE1\"] [\r\n[id \"942200\"] [tag \"OWASP_CRS/WEB_ATTACK/SQL_INJEC\r\n[id \"942210\"] [tag\r\n[id \"942210\"] [tag \"capec/10\r\n[id \"942260\"] [tag \"WASCTC/WASC-\r\n[id \"942260\"] [tag \"WASCTC/WASC-19\"] [\r\n[id \"942340\"] [tag \"OW\r\n[id \"942340\"] [tag \"OWASP_TO\r\n[id \"942340\"] [tag \"PCI\r\n[id \"942370\"] [tag \"OWASP_AppSensor/CIE1\r\n[id \"942370\"] [tag \"OWASP_CRS/WEB_ATTACK/SQL_INJECT\r\n[id \"942370\"] [tag \"OWASP_CRS/WEB_ATTACK/SQL_INJECTION\"]\r\n[id \"942430\"] [tag \"applicat\r\n[id \"942430\"] [tag \"application-multi\"] [ta\r\n[id \"942430\"] [tag \"capec/\r\n[id \"942430\"] [tag \"paranoi\r\n[id \"942430\"] [tag \"paranoia-level/\r\n[id \"942430\"] [tag \"paranoia-level/2\"\r\n[id \"942430\"] [tag \"PCI/6.5\r\n[id \"942430\"] [tag \"PCI/6.5.\r\n[id \"942450\"] [tag \"OWASP_AppSensor/CIE\r\n```\r\n\r\nHere is an example of a complete log entry:\r\n```\r\n[2020-09-29 03:59:11.378270] [-:error] 111.111.111.111:111111 XXXXXXXXXXXXXXXXXXXXXXXXXXX [client 111.111.111.111] ModSecurity: Warning. Pattern match \"(?i:[\\\\\\\\s'\\\\\"`()]*?\\\\\\\\b([\\\\\\\\d\\\\\\\\w]+)\\\\\\\\b[\\\\\\\\s'\\\\\"`()]*?(?:<(?:=(?:[\\\\\\\\s'\\\\\"`()]*?(?!\\\\\\\\b\\\\\\\\1\\\\\\\\b)[\\\\\\\\d\\\\\\\\w]+|>[\\\\\\\\s'\\\\\"`()]*?(?:\\\\\\\\b\\\\\\\\1\\\\\\\\b))|>?[\\\\\\\\s'\\\\\"`()]*?(?!\\\\\\\\b\\\\\\\\1\\\\\\\\b)[\\\\\\\\d\\\\\\\\w]+)|(?:not\\\\\\\\s+(?:regexp|like)|is\\\\\\\\s+not|>=?|!=|\\\\\\\\^)[\\\\\\\\s'\\\\\"`()]*?(?!\\\\\\\\b\\\\\\\\1\\\\\\\\b)[\\\\\\\\d\\\\\\\\w]+|(?:(?:s ...\" at XML. [file \"/opt/modsecurity-rules/3.3.0/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf\"] [line \"590\"] [id \"942130\"] [msg \"SQL Injection Attack: SQL Tautology Detected\"] [data \"Matched Data: ZZZZZZZZZZZZZZZZZZZZZZZZZ found within XML: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/3.3.0\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-sqli\"] [t [hostname \"aaaaaaaaaaaaaaaaaaaaaa\"] [uri \"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\"] [unique_id \"XXXXXXXXXXXXXXXXXXXXXXXXXXX\"]\r\n```\r\n\r\n**Expected behavior**\r\n\r\nThe log entries should not be truncated.\r\n\r\nAlternatively, configuration options could be added that allow the user to configure the number of characters that are allowed before the log entry is truncated.\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version: ModSecurity v2.9.3\r\n - WebServer: Apache httpd 2.4.46\r\n - OS: RHEL\r\n\r\n\r\n**Rule Set (please complete the following information):**\r\n - Running any public or commercial rule set? CRS\r\n - What is the version number? 3.3.0\r\n\r\n**Additional context**\r\n\r\nSee also: https://github.com/coreruleset/coreruleset/issues/1888.\r\n", + "summary": "**Fix log entry truncation issue**\n\nIdentify the cause of log entry truncation in ModSecurity, specifically for long error log lines that cut off crucial information like rule tags and severity. This issue predominantly affects users of the OWASP Core Rule Set (CRS), as rule tags are integral to its functionality.\n\n**Steps to tackle the problem:**\n\n1. **Review ModSecurity Logging Configuration**: Check current logging settings in ModSecurity to determine if there are default limits on log line lengths.\n\n2. **Increase Log Length Limit**: If a limit exists, modify the configuration to increase the maximum allowable characters for log entries. Look for directives related to log formatting.\n\n3. **Implement Dynamic Configuration Options**: Consider adding configuration options that let users specify the maximum length of log entries. This would provide flexibility for different environments.\n\n4. **Test Changes**: After updating configurations, perform tests to confirm that long log entries are fully captured without truncation.\n\n5. **Check Compatibility**: Ensure that changes are compatible with ModSecurity version 2.9.3 and Apache httpd 2.4.46.\n\n6. **Document Changes**: Update documentation to guide users on how to configure these settings for their environments.\n\n7. **Monitor Logs Post-Implementation**: After deploying the changes, regularly monitor the logs to ensure entries are no longer truncated, and gather user feedback for further improvements.\n\nBy following these steps, ensure that error logs maintain all necessary information, enhancing the usability of the CRS and improving overall security monitoring.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2416", @@ -60961,6 +63083,7 @@ "node_id": "MDU6SXNzdWU3MTc4NzczMjc=", "title": "Log sanitizer by help of nginx", "body": "Hello As Log sanitizer is not available in V3. We are using Modsecurity 3.0.4 with nginx 1.18. So is there any other method/modules by which we can do Log sanity. Please let me know for the same \r\n", + "summary": "Investigate alternatives for log sanitization in Nginx with ModSecurity 3.0.4, as the log sanitizer is not available in version 3. \n\n1. Research existing Nginx modules that can sanitize logs, such as `ngx_http_log_module` or third-party modules like `ngx_http_log_forensic_module`.\n2. Consider implementing a custom logging handler or a Lua script using the `ngx_http_lua_module` to filter sensitive data before it gets logged.\n3. Explore configuration options in ModSecurity to mask or anonymize logs. Check the ModSecurity documentation for directives that can help with log filtering.\n4. Set up a test environment to implement and evaluate potential solutions. Use sample logs to verify that sensitive information is adequately sanitized.\n5. Document the chosen solution, including configuration examples and any additional modules required.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2419", @@ -60976,8 +63099,8 @@ "repository": 1213, "assignees": [], "labels": [ - 399, - 415 + 415, + 399 ] } }, @@ -60990,6 +63113,7 @@ "node_id": "MDU6SXNzdWU3MjQ0NTcxOTY=", "title": "'SecResponseBodyAccess Off' does not prevent response body from being logged (SecRuleEngine == DetectionOnly)", "body": "Hi!\r\nSetting _SecResponseBodyAccess_ to _Off_ does not prevent response body from being logged ( _SecAuditLogParts ABIJDEFHZ_)\r\n\r\nDoes it mean that with _SecResponseBodyAccess Off_ ResponseBody is still being intercepted?\r\n\r\n**Expected behaviour** - according to https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-(v2.x)#SecAuditLogParts \"_E: Intermediary response body (present only if ModSecurity is configured to intercept response bodies... Intercepting response bodies requires SecResponseBodyAccess to be enabled_\"\r\n\r\n**Server:**\r\n - ModSecurity version: ModSecurity v3.0.4 \r\n - WebServer: nginx-1.18.0\r\n - OS (and distro): Ubuntu 16.04\r\n\r\n**Rule Set :**\r\nhttps://github.com/coreruleset/coreruleset/archive/v3.3.0.tar.gz\r\n\r\n**Additional context**\r\n _SecRuleEngine_ == _DetectionOnly_ and _SecAuditLogType_ == _Concurrent_ \r\nRemoving _E_ from _SecAuditLogParts_ do prevent ResponseBody from being logged.\r\nIf I set _SecRuleEngine_ to _On_ and turn on DEBUG logging - logs do confirm that SecResponseBodyAccess is not ignored (https://github.com/SpiderLabs/ModSecurity/commit/42a472adbda21e6ecc4711fd37d704cdb3d98fbb):\r\n`Response body is disabled, returning... 1`", + "summary": "Investigate the issue where 'SecResponseBodyAccess Off' does not prevent response body logging when 'SecRuleEngine' is set to 'DetectionOnly'. \n\n1. Confirm that 'SecResponseBodyAccess Off' correctly disables response body interception.\n2. Verify the expected behavior outlined in the ModSecurity documentation, specifically regarding 'SecAuditLogParts' and the logging of intermediary response bodies.\n3. Examine the ModSecurity version (v3.0.4), nginx version (1.18.0), and Ubuntu OS (16.04) to ensure compatibility.\n4. Test the configuration by setting 'SecAuditLogParts' to exclude 'E' and confirm that response body logging ceases.\n5. When 'SecRuleEngine' is set to 'On', observe DEBUG logs to validate that 'SecResponseBodyAccess' is functioning correctly.\n6. Review the GitHub commit (42a472adbda21e6ecc4711fd37d704cdb3d98fbb) for insights on recent changes that may affect this behavior.\n\nTake the following first steps:\n- Reproduce the issue in a controlled environment.\n- Adjust the configuration settings as described and monitor the logging behavior.\n- Document any discrepancies and gather relevant logs for further analysis.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2425", @@ -61020,6 +63144,7 @@ "node_id": "MDU6SXNzdWU3Mjc3MDE1OTc=", "title": "ctl:ruleRemoveTargetByTag does not support regex in ModSecurity v3.x", "body": "Issue https://github.com/SpiderLabs/ModSecurity/issues/588 was raised against ModSecurity v2.9.x. In those versions ctl:ruleRemoveTargetByTag does support regular expressions, but the raised issue noted that some regex metacharacters cause difficulties.\r\n\r\nThis new issue is being created to separately track the different state of affairs in v3.x.\r\n\r\nIn particular, ctl:ruleRemoveTargetByTag works correctly when specifying regular text. It does not, however, have any support at all for regex.\r\n\r\nThis appears to have been intentional in the initial implementation of the v3.x code, so any regex functionality implemented for ctl:ruleRemoveTargetByTag now may be considered an enhancement rather a bug fix.\r\n\r\nIt's unclear how widely useful regular expressions are for this particular ctl action, but this issue at least allows the matter to be considered for 3.1 (or later) implementation.\r\n", + "summary": "Implement a regex support feature for the `ctl:ruleRemoveTargetByTag` directive in ModSecurity v3.x. Currently, this directive only supports literal text and lacks any regex functionality, which was a change from v2.9.x where regex was supported.\n\n1. Review the initial implementation of `ctl:ruleRemoveTargetByTag` in v3.x to understand the design decisions that led to the exclusion of regex support.\n2. Analyze the use cases for regex in this context to assess the demand and potential impact of adding this feature.\n3. Define the regex syntax and metacharacters that should be supported to align with user expectations and existing regex standards.\n4. Develop a prototype to integrate regex functionality into the `ctl:ruleRemoveTargetByTag` directive.\n5. Test the implementation thoroughly to ensure compatibility and functionality without introducing regressions.\n6. Document the enhancement and update any relevant user guides or documentation for ModSecurity.\n\nConsider proposing this enhancement for inclusion in the 3.1 release or later versions.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2432", @@ -61051,6 +63176,7 @@ "node_id": "MDU6SXNzdWU3MzYzNzk5NDY=", "title": "Collection multi vars", "body": "Hello,\r\n\r\nWhen saving a value on several variables (setvar), the value actually taken into account is only the last one.\r\n\r\nI reproduced the problem by creating a regression rule.\r\n\r\nExample:\r\n[\r\n {\r\n \"enabled\":1,\r\n \"version_min\":300000,\r\n \"title\":\"Testing multi variables :: m.set SESSION & GLOBAL (1/1)\",\r\n \"resource\":\"lua\",\r\n \"client\":{\r\n \"ip\":\"200.249.12.31\",\r\n \"port\":123\r\n },\r\n \"server\":{\r\n \"ip\":\"200.249.12.31\",\r\n \"port\":80\r\n },\r\n \"request\":{\r\n \"headers\":{\r\n \"Host\":\"localhost\",\r\n \"User-Agent\":\"My sweet little browser\",\r\n \"Accept\":\"*/*\",\r\n \"Content-Length\": \"0\"\r\n },\r\n \"uri\":\"/whee?res=1\",\r\n \"method\":\"GET\",\r\n \"body\": [ ]\r\n },\r\n \"response\":{\r\n \"headers\":{},\r\n \"body\":[\r\n \"no need.\"\r\n ]\r\n },\r\n \"expected\":{\r\n \"audit_log\":\"\",\r\n \"debug_log\":\"Target value: \\\"2\\\" \\\\(Variable: SESSION:::::test\\\\)\",\r\n \"error_log\":\"\"\r\n },\r\n \"rules\":[\r\n \"SecRuleEngine On\",\r\n\t \"SecAction \\\"id:1,pass,setvar:SESSION.test=2\\\"\",\r\n\t \"SecAction \\\"id:2,pass,setvar:GLOBAL.test=1\\\"\",\r\n\t \"SecRule SESSION.test \\\"@contains 2\\\" \\\"id:3,t:none\\\"\"\r\n ]\r\n }\r\n]\r\n\r\nResult:\r\nTest name: Testing multi variables :: m.set SESSION & GLOBAL (1/1).\r\nReason:\r\nDebug log was not matching the expected results.\r\nExpecting: Target value: \"2\" \\(Variable: SESSION:::::test\\)\r\nDebug log:\r\n[1604518512] [] [4] Initializing transaction\r\n[1604518512] [] [4] Transaction context created.\r\n[1604518512] [] [4] Starting phase CONNECTION. (SecRules 0)\r\n[1604518512] [] [9] This phase consists of 0 rule(s).\r\n[1604518512] [] [4] Starting phase URI. (SecRules 0 + 1/2)\r\n[1604518512] [/whee?res=1] [4] Adding request argument (GET): name \"res\", value \"1\"\r\n[1604518512] [/whee?res=1] [4] Starting phase REQUEST_HEADERS. (SecRules 1)\r\n[1604518512] [/whee?res=1] [9] This phase consists of 3 rule(s).\r\n[1604518512] [/whee?res=1] [4] (Rule: 1) Executing unconditional rule...\r\n[1604518512] [/whee?res=1] [4] Running [independent] (non-disruptive) action: setvar\r\n[1604518512] [/whee?res=1] [8] Saving variable: SESSION:test with value: 2\r\n[1604518512] [/whee?res=1] [4] Running (disruptive) action: pass.\r\n[1604518512] [/whee?res=1] [8] Running action pass\r\n[1604518512] [/whee?res=1] [4] (Rule: 2) Executing unconditional rule...\r\n[1604518512] [/whee?res=1] [4] Running [independent] (non-disruptive) action: setvar\r\n[1604518512] [/whee?res=1] [8] Saving variable: GLOBAL:test with value: 1\r\n[1604518512] [/whee?res=1] [4] Running (disruptive) action: pass.\r\n[1604518512] [/whee?res=1] [8] Running action pass\r\n[1604518512] [/whee?res=1] [4] (Rule: 3) Executing operator \"Contains\" with param \"2\" against SESSION:test.\r\n**[1604518512] [/whee?res=1] [9] Target value: \"1\" (Variable: SESSION:::::test)**\r\n[1604518512] [/whee?res=1] [4] Rule returned 0.\r\n[1604518512] [/whee?res=1] [9] Matched vars cleaned.\r\n[1604518512] [/whee?res=1] [9] Appending request body: 0 bytes. Limit set to: 0.000000\r\n[1604518512] [/whee?res=1] [4] Starting phase REQUEST_BODY. (SecRules 2)\r\n[1604518512] [/whee?res=1] [9] This phase consists of 0 rule(s).\r\n[1604518512] [/whee?res=1] [4] Starting phase RESPONSE_HEADERS. (SecRules 3)\r\n[1604518512] [/whee?res=1] [9] This phase consists of 0 rule(s).\r\n[1604518512] [/whee?res=1] [9] Appending response body: 9 bytes. Limit set to: 0.000000\r\n[1604518512] [/whee?res=1] [4] Starting phase RESPONSE_BODY. (SecRules 4)\r\n[1604518512] [/whee?res=1] [4] Response body is disabled, returning... 2\r\n[1604518512] [/whee?res=1] [4] Starting phase LOGGING. (SecRules 5)\r\n[1604518512] [/whee?res=1] [9] This phase consists of 0 rule(s).\r\n[1604518512] [/whee?res=1] [8] Checking if this request is suitable to be saved as an audit log.\r\n[1604518512] [/whee?res=1] [5] Audit log engine was not set.\r\n\r\nI looked at the collection files, the m_name is not record when saving a value in the storeOrUpdateFirst function.\r\n\r\nThe key is then common for all types (GLOBAL, IP, ...)\r\n\r\nAm I missing something?\r\n\r\nThank you", + "summary": "Fix the issue where setting multiple variables using `setvar` only saves the last value. \n\n1. Reproduce the bug with the provided JSON example, which uses regression rules to set session and global variables.\n2. Examine the `storeOrUpdateFirst` function in the collection files. Check how variable names are being saved and ensure they differentiate between types (e.g., SESSION, GLOBAL).\n3. Investigate the key management within the function. Confirm that keys are unique for each variable type to prevent overwriting.\n4. Implement a fix to ensure that when multiple `setvar` actions are executed, each variable retains its intended value.\n\nFirst steps:\n- Clone the repository and set up a local testing environment.\n- Create a unit test that mimics the provided example to confirm the bug exists.\n- Debug the `storeOrUpdateFirst` function to identify where the overwriting occurs.\n- Modify the function to handle multiple variable types correctly, ensuring that each type maintains its own state.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2442", @@ -61081,6 +63207,7 @@ "node_id": "MDU6SXNzdWU3NDY2OTE4NDY=", "title": "IIS 10 + modsecurity on docker problem with SecDataDir", "body": "Hi gurus\r\nIm having problems trying to configure SecDataDir param on my current setup, when the parameter is activated on modsecurity.conf I stop getting data in my audit file, and these files are created empty in my folder\r\n\r\nDefaultAppPool-global.dir\r\nDefaultAppPool-global.pag\r\nDefaultAppPool-ip.dir\r\nDefaultAppPool-ip.pag\r\n\r\nMy application runs fine but no log is generated in the audit.\r\n\r\nCan someone please help me on what Im doing wrong with this SecDataDir to fix it.\r\n\r\nHere is my setup\r\n\r\nBase docker image is mcr.microsoft.com/dotnet/framework/aspnet:4.8\r\nMy application is ASP.net\r\nModSecurity version 3, msi installer\r\nCoreRuleSet coreruleset-3.4-dev\r\n\r\n\r\nModSecurity.conf contents \r\nSecRuleEngine DetectionOnly\r\nSecRequestBodyAccess On\r\nSecStreamInBodyInspection On\r\nSecRule ARGS, \"zzz\" phase:1,log,deny,status:503,id:1\r\n\r\nSecRule REQUEST_HEADERS:Content-Type \"(?:application(?:/soap\\+|/)|text/)xml\" \\\r\n \"id:'200000',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML\"\r\n\r\nSecRule REQUEST_HEADERS:Content-Type \"application/json\" \\\r\n \"id:'200001',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=JSON\"\r\n\r\nSecRequestBodyLimit 13107200\r\nSecRequestBodyNoFilesLimit 131072\r\nSecRequestBodyLimitAction Reject\r\n\r\nSecRule REQBODY_ERROR \"!@eq 0\" \\\r\n\"id:'200002', phase:2,t:none,log,deny,status:400,msg:'Failed to parse request body.',logdata:'%{reqbody_error_msg}',severity:2\"\r\n\r\nSecRule MULTIPART_STRICT_ERROR \"!@eq 0\" \\\r\n\"id:'200003',phase:2,t:none,log,deny,status:400, \\\r\nmsg:'Multipart request body failed strict validation: \\\r\nPE %{REQBODY_PROCESSOR_ERROR}, \\\r\nBQ %{MULTIPART_BOUNDARY_QUOTED}, \\\r\nBW %{MULTIPART_BOUNDARY_WHITESPACE}, \\\r\nDB %{MULTIPART_DATA_BEFORE}, \\\r\nDA %{MULTIPART_DATA_AFTER}, \\\r\nHF %{MULTIPART_HEADER_FOLDING}, \\\r\nLF %{MULTIPART_LF_LINE}, \\\r\nSM %{MULTIPART_MISSING_SEMICOLON}, \\\r\nIQ %{MULTIPART_INVALID_QUOTING}, \\\r\nIP %{MULTIPART_INVALID_PART}, \\\r\nIH %{MULTIPART_INVALID_HEADER_FOLDING}, \\\r\nFL %{MULTIPART_FILE_LIMIT_EXCEEDED}'\"\r\n\r\nSecRule MULTIPART_UNMATCHED_BOUNDARY \"@eq 1\" \\\r\n \"id:'200004',phase:2,t:none,log,deny,msg:'Multipart parser detected a possible unmatched boundary.'\"\r\n\r\nSecPcreMatchLimit 1000\r\nSecPcreMatchLimitRecursion 1000\r\n\r\nSecRule TX:/^MSC_/ \"!@streq 0\" \\\r\n \"id:'200005',phase:2,t:none,deny,msg:'ModSecurity internal error flagged: %{MATCHED_VAR_NAME}'\"\r\n\r\nSecResponseBodyAccess On\r\n\r\nSecResponseBodyMimeType text/plain text/html text/xml\r\n\r\nSecResponseBodyLimit 524288\r\n\r\nSecResponseBodyLimitAction ProcessPartial\r\n\r\nSecTmpDir C:\\inetpub\\temp\r\n\r\nSecDataDir C:\\inetpub\\temp\r\nSecDebugLog C:\\inetpub\\logs\\debug.log\r\nSecDebugLogLevel 3\r\nSecAuditLogType Serial\r\nSecAuditLog C:\\inetpub\\logs\\modsecurity_audit.log\r\nSecAuditEngine RelevantOnly \r\nSecAuditLogRelevantStatus \"^(?:5|4(?!04))\" \r\nSecAuditLogParts ABIJDEFHZ \r\nSecAuditLogStorageDir C:\\inetpub\\logs\r\nSecArgumentSeparator &\r\nSecCookieFormat 0\r\nSecUnicodeMapFile unicode.mapping 20127\r\nSecStatusEngine On\r\n\r\ncrs-setup.conf is default\r\n\r\nmodsecurity_iis.conf\r\nInclude modsecurity.conf\r\nInclude crs-setup.conf\r\nInclude owasp_crs\\rules\\*.conf\r\n\r\nAudit log when parameter is deactivated\r\n--23480000-H--\r\nMessage: collection_retrieve_ex: Unable to retrieve collection (name \"global\", key \"global\"). Use SecDataDir to define data directory first.\r\nMessage: collection_retrieve_ex: Unable to retrieve collection (name \"ip\", key \"10.244.0.217_14e2560bfa3cd3a5250d92225315673dde28bb9a\"). Use SecDataDir to define data directory first.\r\nApache-Handler: IIS\r\nStopwatch: 1605798105876665 2985738 (- - -)\r\nStopwatch2: 1605798105876665 2985738; combined=15609, p1=0, p2=0, p3=0, p4=15609, p5=0, sr=0, sw=0, l=0, gc=0\r\nResponse-Body-Transformed: Dechunked\r\nProducer: ModSecurity for IIS (STABLE)/2.9.3 (http://www.modsecurity.org/); OWASP_CRS/3.3.0.\r\nServer: ModSecurity Standalone\r\nEngine-Mode: \"DETECTION_ONLY\"", + "summary": "**Configure SecDataDir in ModSecurity for IIS 10 on Docker**\n\n1. **Identify the Issue**: The configuration of `SecDataDir` in your ModSecurity setup is preventing data from being logged in the audit file, resulting in empty files for `DefaultAppPool-global.dir`, `DefaultAppPool-global.pag`, `DefaultAppPool-ip.dir`, and `DefaultAppPool-ip.pag`. \n\n2. **Verify Configuration**: Check that the path defined in `SecDataDir` (`C:\\inetpub\\temp`) is correctly set and accessible within the Docker container. Ensure that the container has the necessary permissions to read from and write to this directory.\n\n3. **Examine Docker Volume**: If using Docker volumes, ensure that the directory specified in `SecDataDir` is correctly mapped to a persistent storage location. Adjust the Docker `docker-compose.yml` or run command to ensure proper volume mapping.\n\n4. **Check ModSecurity Logs**: Review the debug log at `C:\\inetpub\\logs\\debug.log` to identify any errors related to the `SecDataDir` configuration. Look for messages indicating permission issues or other errors that might indicate why data isn't being written.\n\n5. **Set Up Proper File Permissions**: Ensure that the user running the IIS application pool has write permissions to `C:\\inetpub\\temp`. You may need to run the container with elevated privileges or adjust the user settings in IIS.\n\n6. **Test Configuration**: Temporarily set `SecDebugLogLevel` to a higher value (e.g., 5) to gather more verbose output about what is happening when ModSecurity processes requests.\n\n7. **Restart the Application**: After making changes, restart the IIS application pool and the Docker container to ensure that all configuration changes take effect.\n\n8. **Simplify Rule Set**: Start with a minimal configuration and gradually reintroduce rules to identify if a specific rule is causing the issue with the audit log.\n\n9. **Validate Rule Syntax**: Look for syntax issues in the rules defined in `modsecurity.conf` that might interfere with proper logging.\n\n10. **Document Changes**: Keep a record of any modifications made during troubleshooting to revert back if necessary.\n\nBy following these steps, you should be able to diagnose and resolve the issues related to the `SecDataDir` configuration in your ModSecurity setup on IIS 10 running in Docker.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2456", @@ -61096,8 +63223,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, - 401 + 401, + 398 ] } }, @@ -61110,6 +63237,7 @@ "node_id": "MDU6SXNzdWU3NTU2MDQ5NDU=", "title": "SecRequestBodyAccess off skips the phase:2 rules", "body": "**Describe the bug**\r\n\r\nIf the `SecRequestBodyAccess` is `off`, then the engine skips all rules which has `phase:2` action.\r\n\r\n**To Reproduce**\r\n\r\n[Here](https://gist.github.com/airween/f20120d8e419cd72c03ecb2aeb6a54fe) you can find a regression test case.\r\n\r\nIn the debug log you can see this line:\r\n```\r\n[nnnnnn] [/?q=evil] [4] Request body processing is disabled\r\n```\r\n\r\n**Expected behavior**\r\n\r\nAs you can see, the request contains an XML payload (CT doesn't count...), and only one (relevant) rule with `id:1`. The rule has two variable: `REQUEST_URI` and `REQUEST_BODY`. First one available in phase 1, but the second one only in phase 2, so the rule set to phase 2. The request was constructed that both variable matches, but the test returns with 200. \r\n\r\nThe expected behavior is 403, because the config disables only the processing of **request body**, not the whole phase 2.\r\n\r\nIf you replace the `phase:2` by `phase:1` in rule 1, then the request will blocked as it expected.\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): libmodsecurity3.0.4, current master [afefda](https://github.com/SpiderLabs/ModSecurity/commit/afefda53c69bb17e2ec16d24dd6fbc2c8fd7d063)\r\n\r\n**Additional context**\r\n\r\nThere are two another issues related to this wrong behavior:\r\n[#1940](https://github.com/coreruleset/coreruleset/issues/1940) - CRS issue - this helped me to describe this behavior\r\n#2273 - ModSecurity - SecRequestBodyAccess Off doesn't fully disable request body processing\r\n\r\nI checked the source, and found a very strange solution to handle the `SecRequestBodyAccess` - looks like the `appendBody()` allocates a buffer even thought the variable set it to `off`. In addition, the chosen body processor also woke up, and the payload will parsed. Checking the body access is comes only at this point, that's why the #2273 exists. I assume that users disable the body access to save CPU/memory (as reporter also wrote there), but in this isn't fulfilled.\r\n\r\nThe another strange behavior is if the `SecRequestBodyAccess` is `off`, then whole phase 2 will ignored (in line [895-911](https://github.com/SpiderLabs/ModSecurity/blob/v3/master/src/transaction.cc#L895-L911)), because the evaluation called later, in [L938](https://github.com/SpiderLabs/ModSecurity/blob/v3/master/src/transaction.cc#L938).\r\n\r\nNote: I didn't checked, but I suppose the `ctl:requestBodyAccess=off` would give same result.", + "summary": "**Fix the issue where `SecRequestBodyAccess off` skips `phase:2` rules.**\n\n1. Identify the problem: When `SecRequestBodyAccess` is set to `off`, all rules that have `phase:2` actions are skipped, which prevents proper evaluation of rules that depend on request body data.\n\n2. Reproduce the bug: Use the provided regression test case linked in the issue. Observe debug logs for the message: `[n] [/?q=evil] [4] Request body processing is disabled`.\n\n3. Expected behavior: Ensure that rules are evaluated correctly in `phase:2` even when `SecRequestBodyAccess` is `off`. The request should trigger a 403 response when relevant variables match, rather than a 200 response.\n\n4. Review the code: Examine the ModSecurity source, particularly the handling of `SecRequestBodyAccess` in `transaction.cc`. Focus on the sections where `appendBody()` is called and where body access is checked. Note the logic that causes the entire `phase:2` to be ignored when body processing is disabled.\n\n5. Investigate related issues: Look into issues #1940 and #2273 for additional context and potential solutions.\n\n6. First steps to tackle the problem:\n - Modify the logic in `transaction.cc` to allow for the evaluation of `phase:2` rules even when `SecRequestBodyAccess` is off.\n - Ensure that variables available in `phase:1` can still trigger relevant `phase:2` rules.\n - Test the changes with the provided regression tests to confirm that the expected behavior is achieved.\n\n7. Consider testing the alternative `ctl:requestBodyAccess=off` to see if it yields the same problematic behavior, and document findings.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2465", @@ -61140,6 +63268,7 @@ "node_id": "MDU6SXNzdWU3NTk0NDU0ODc=", "title": "sanitiseMatchedBytes only works with 1 digit parameters", "body": "sanitiseMatchedBytes:5/8 => OK\r\nsanitiseMatchedBytes:10/2 => not accepted => sanitizes the whole string\r\n\r\nIn msre_action_sanitizeMatchedBytes_init():\r\n`if (action->param != NULL && strlen(action->param) == 3)`\r\nshould be\r\n`if (action->param != NULL && strlen(action->param) >= 3)`", + "summary": "Fix the `sanitiseMatchedBytes` function to support multi-digit parameters. Currently, it only accepts single-digit parameters, as demonstrated by the behavior where `sanitiseMatchedBytes:5/8` works, but `sanitiseMatchedBytes:10/2` fails, sanitizing the entire string instead of just the specified bytes.\n\nIn the function `msre_action_sanitizeMatchedBytes_init()`, modify the condition that checks the length of the parameter. Change the existing line:\n\n```c\nif (action->param != NULL && strlen(action->param) == 3)\n```\n\nto:\n\n```c\nif (action->param != NULL && strlen(action->param) >= 3)\n```\n\nThis adjustment allows the function to correctly process parameters with a length of 3 or more characters, enabling it to handle multi-digit values.\n\nFirst steps to tackle this issue:\n1. Locate the `msre_action_sanitizeMatchedBytes_init()` function in the codebase.\n2. Implement the suggested change to the length check.\n3. Write unit tests that cover cases with both single-digit and multi-digit parameters to ensure proper functionality.\n4. Run existing test suites to confirm that no other functionality is broken by the change.\n5. Document the change to inform other developers about the update in parameter handling.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2472", @@ -61171,6 +63300,7 @@ "node_id": "MDU6SXNzdWU3NjcxNTgzNzE=", "title": "Change SecRuleRemoveByMsg behaviour to looking of substring", "body": "Hi guys. I have a little proposal for modsecurity that i want to discuss with you.\r\n\r\n**Background:**\r\nI am trying to move from modsecurity 2.9 to modsecurity 3.x and found out some behaviour changes of SecRuleRemove* directives.\r\nOn modsecurity 2 _SecRuleRemoveByTag_ and _SecRuleRemoveByMsg_ was an regular expressions. But for 3.x behaviour of this directives changed to checking if string exact match. Based on [this commentary](https://github.com/SpiderLabs/ModSecurity/issues/2109#issuecomment-498648854): reasons of this change is performance issues with regular expressions. This is good enough argument from my point of view so this is fine.\r\n\r\nBut i get a little problems with _SecRuleRemoveByMsg_ and this is base for my proposal for modsecurity:\r\nI using [OWASP ruleset](https://github.com/coreruleset/coreruleset). For several rules removing with SecRuleRemoveByMsg can be a problematic. In messages of this rules request or modsecurity data are used (like ${tx.real_ip} or %{TX.0}). You can check it in such rules like: [912170](https://github.com/coreruleset/coreruleset/blob/54dae9a40471eaf61c3c50b72c5eb5d20e4f17ea/rules/REQUEST-912-DOS-PROTECTION.conf#L110), [912171](https://github.com/coreruleset/coreruleset/blob/54dae9a40471eaf61c3c50b72c5eb5d20e4f17ea/rules/REQUEST-912-DOS-PROTECTION.conf#L291) or [920450.](https://github.com/coreruleset/coreruleset/blob/80a7739f9d252e5c0922df4b49bf8b694970042f/rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf#L1107).\r\nSo since modsecurity expect exactly match of strings in SecRuleRemoveByMsg, I can't remove this rules with it. Or can do that only for one ip address in case of 912170 for example.\r\nIf I going to use some other rule sets ([comodo rule set](https://waf.comodo.com/) is a good case), I will face same problem but with much more number of rules.\r\n\r\n**Proposal:**\r\nI suggest SecRuleRemoveByMsg logic can be changed to looking for a substring. It seems like pretty good compromise between match of string and regular expression. This change give a little more space for actions when using SecRuleRemoveByMsg directive. And as far as i can see this don't make such a big impact to performance as regular expressions.\r\n\r\nTo do that seems like no much need to be changed in modsecurity, only condition [over there](https://github.com/SpiderLabs/ModSecurity/blob/f18595f42830f2f0ac27362a8b31120e3dfb850c/src/rules_set.cc#L166).\r\nIf you agreed that this idea is good enough i could try make PR.", + "summary": "Change SecRuleRemoveByMsg behavior to substring matching. \n\n**Background:** \nTransition from ModSecurity 2.9 to 3.x reveals changes in SecRuleRemove directives. In version 2, SecRuleRemoveByTag and SecRuleRemoveByMsg utilized regular expressions. In 3.x, these directives now require exact string matches, which poses challenges when using the OWASP ruleset due to dynamic elements in messages (e.g., `${tx.real_ip}`, `%{TX.0}`). This restricts rule removal to exact matches and complicates scenarios involving variable data.\n\n**Proposal:**\nModify SecRuleRemoveByMsg logic to support substring matching instead of exact matches. This allows greater flexibility in rule management while maintaining performance benchmarks better than full regex matching.\n\n**First Steps:**\n1. Review the current implementation of SecRuleRemoveByMsg to identify areas for modification.\n2. Analyze the performance impact of changing matching logic to substring checks.\n3. Update the condition in `rules_set.cc` to enable substring matching.\n4. Test changes with existing OWASP and Comodo rulesets to ensure functionality.\n5. Prepare a pull request (PR) if the proposal is validated and agreed upon.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2476", @@ -61203,6 +63333,7 @@ "node_id": "MDU6SXNzdWU3NzA4NTg0Nzg=", "title": "ModSecurity 3 - Windows Apache/Nginx Connector", "body": "Hi all,\r\nit's been a while since the last update i could find on this topic.\r\nWe are facing the need to provide ModSecurity v3 in Windows environment.\r\nCurrently using IIS with 2.9.4.\r\n\r\nWhat's the current status? \r\nIs it plausible to compile and use it with Apache or Nginx on Windows to your knowledge?\r\n\r\nThanks!", + "summary": "Investigate the feasibility of using ModSecurity v3 with Apache or Nginx on Windows. Assess the current status of the Windows connector for ModSecurity 3. Specifically, determine if it is possible to compile and run ModSecurity v3 in a Windows environment, given that the user is currently utilizing IIS with version 2.9.4.\n\nTo tackle this problem:\n\n1. Research existing documentation and community discussions on ModSecurity v3 compatibility with Windows.\n2. Check the ModSecurity GitHub repository for any updates or issues related to Windows support.\n3. Explore compilation options for building ModSecurity v3 with Apache and Nginx on Windows.\n4. Identify any dependencies or prerequisites required for setting up ModSecurity on the Windows platform.\n5. Consider reaching out to the ModSecurity community or maintainers for insights or guidance on this matter.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2480", @@ -61233,6 +63364,7 @@ "node_id": "MDU6SXNzdWU3ODA2MjExNTA=", "title": "Problems with blocking if response is using compression", "body": "**Describe the bug**\r\n\r\nWhen blocking a request, which uses output compression (for example Content-Encoding: gzip), in phase 4 (so response headers are already created by application), blocking is NOT done properly because `Content-Encoding: gzip` will already be send to client so it awaits compressed response - on the client side, browsers will show error similar to this (this one is from Firefox):\r\n`Content Encoding Error`\r\n\r\n**To Reproduce**\r\n\r\nProblem can be easily reproduced using this PHP script and blocking rule:\r\n\r\n```\r\n\r\n```\r\n```\r\nSecRule &RESPONSE_HEADERS:X-test-blocking \"!@eq 0\" \\\r\n \"id:99999999999,\\\r\n phase:4,\\\r\n deny\"\r\n```\r\n\r\n\r\n\r\n**Expected behavior**\r\n\r\nmodsecurity should remove or properly set the `Content-Encoding` header if request is blocked.\r\n", + "summary": "**Fix the blocking behavior for requests with compressed responses**\n\n**Problem:** Modify the handling of HTTP responses that utilize output compression, specifically in situations where a request is blocked in phase 4. Currently, when a request is blocked, the `Content-Encoding: gzip` header is still sent to the client, resulting in errors like `Content Encoding Error` in browsers.\n\n**Technical Details:**\n- The issue arises when a blocking rule is triggered after response headers, including `Content-Encoding: gzip`, are sent.\n- For example, using a PHP script that outputs compressed data along with a blocking response header can lead to this problem.\n\n**Example Reproduction Script:**\n```php\n\n```\n\n**Example ModSecurity Rule:**\n```\nSecRule &RESPONSE_HEADERS:X-test-blocking \"!@eq 0\" \\\n \"id:99999999999,\\\n phase:4,\\\n deny\"\n```\n\n**Expected Behavior:** Ensure that ModSecurity modifies or removes the `Content-Encoding` header when a request is blocked, preventing any errors on the client side.\n\n**First Steps to Tackle the Problem:**\n1. Investigate the ModSecurity source code to understand how the response headers are processed in phase 4.\n2. Implement a check to alter or remove the `Content-Encoding` header when a blocking response is initiated.\n3. Test the modified rule with the provided PHP script to ensure that blocking works correctly without causing content encoding errors.\n4. Document the changes and update any relevant configurations or rules to reflect the new behavior.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2494", @@ -61263,6 +63395,7 @@ "node_id": "MDU6SXNzdWU3OTI3NDI2ODA=", "title": "allow logging to systemd journal", "body": "`SecAuditLog` currently does not allow logging to systemd journal. Or at least that's not documented.\r\n\r\nCould you please allow logging to systemd journal?\r\n\r\nMight be simple to implement. For example any daemon written in `bash` just has to do `echo message` and that will end up in systemd journal. No very special code required in ModSecurity. Just write to stdout/stderr normally. Perhaps `SecAuditLog stdout`?\r\n\r\nAnother option might be supporting `SecAuditLog /dev/log` but `/dev/log` is not a file, it's a unix domain socket file.", + "summary": "Implement logging to systemd journal in ModSecurity. Currently, `SecAuditLog` does not support this functionality, potentially due to a lack of documentation. \n\n1. Implement a feature to log directly to systemd journal using `stdout` or `stderr`. \n2. Consider introducing a configuration option like `SecAuditLog stdout` to facilitate this. \n3. Investigate the feasibility of logging to `/dev/log`, while noting it’s a Unix domain socket and not a regular file. \n\nStart by reviewing existing logging mechanisms in ModSecurity and identifying the code paths where `SecAuditLog` is invoked. Implement a function to redirect logs to `stdout` and test for successful entries in the systemd journal. Document the new feature and update relevant configuration documentation.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2506", @@ -61294,6 +63427,7 @@ "node_id": "MDU6SXNzdWU3OTQ5NTc3NTI=", "title": "[Feature request] Offset support for SecRuleUpdateTargetById", "body": "**Describe the bug**\r\n\r\nCurrently, there's no way how to update target of chained rules. Would be cool to have support for offsets similar to `SecRuleUpdateActionById` directive.\r\n\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): modsecurity 2.9.3, embedded\r\n - WebServer: Apache 2.4\r\n - OS (and distro): Debian\r\n", + "summary": "Implement support for offset updates in the `SecRuleUpdateTargetById` directive to handle chained rules. Currently, this functionality is lacking, which limits the flexibility of rule targeting.\n\n### Technical Details:\n- The existing `SecRuleUpdateActionById` directive allows for the modification of rule actions using offsets but does not extend this capability to target updates.\n- A similar implementation for `SecRuleUpdateTargetById` should be designed to allow users to specify offsets when updating targets of rules, enhancing the management of chained rules.\n\n### Steps to Tackle the Problem:\n1. **Review the current implementation** of `SecRuleUpdateTargetById` to understand its structure and limitations.\n2. **Analyze the `SecRuleUpdateActionById` directive** to identify how offsets are implemented and determine if a similar approach can be applied to the target updates.\n3. **Design a proposal** for the new feature, including syntax changes and examples for users.\n4. **Implement the changes** in the ModSecurity codebase, ensuring compatibility with existing functionality.\n5. **Write tests** to validate the new feature and ensure it works as expected with both single and chained rules.\n6. **Update documentation** to reflect the new feature, providing clear examples and usage guidelines for users.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2508", @@ -61325,6 +63459,7 @@ "node_id": "MDU6SXNzdWU4MDY0MTExMzE=", "title": "Only the last match of a rule is logged", "body": "This program\r\n\r\n```c++\r\n#include \r\n#include \r\n\r\n#include \r\n#include \r\n#include \r\n\r\nint main(void) {\r\n auto modsec = std::make_unique();\r\n auto rules = std::make_unique();\r\n\r\n int res = rules->load(R\"EOM(\r\n SecRule ARGS \\\r\n \"@rx bash\" \\\r\n \"id:1,\\\r\n phase:1,\\\r\n block\"\r\n )EOM\");\r\n if (res < 0) {\r\n std::cerr << rules->getParserError() << std::endl;\r\n return 1;\r\n }\r\n\r\n modsec->setServerLogCb([](void *p, const void *msg) {\r\n auto rm = static_cast(msg);\r\n std::cerr << rm->m_match << std::endl;\r\n }, modsecurity::RuleMessageLogProperty);\r\n\r\n auto transaction = std::make_unique(modsec.get(), rules.get(), nullptr);\r\n\r\n transaction->processConnection(\"127.0.0.1\", 12345, \"127.0.0.1\", 80);\r\n transaction->processURI(\"/?arg1=bash&arg2=bash\", \"GET\", \"1.1\");\r\n transaction->processRequestHeaders();\r\n transaction->processRequestBody();\r\n\r\n return 0;\r\n}\r\n```\r\n\r\nprints out a single message \r\n```\r\nMatched \"Operator `Rx' with parameter `bash' against variable `ARGS:arg2' (Value: `bash' )`.\r\n```\r\n\r\neven though the rule matches both query arguments.\r\n\r\nThis is the place where the operator is applied to every variable.\r\n\r\nhttps://github.com/SpiderLabs/ModSecurity/blob/50fc347ed4d0852d54561e15559de07c698fbb16/src/rule_with_operator.cc#L310-L332 \r\n\r\nNote that there is `performLogging` call. It doesn't log anything here, because [`lastLog=false, hasMultimatch()=false`](https://github.com/SpiderLabs/ModSecurity/blob/50fc347ed4d0852d54561e15559de07c698fbb16/src/rule_with_actions.cc#L496-L541).\r\n\r\nThe actual logging only happens [at the end of `RuleWithOperator::evaluate`](https://github.com/SpiderLabs/ModSecurity/blob/50fc347ed4d0852d54561e15559de07c698fbb16/src/rule_with_operator.cc#L372), which logs only a single message. `ruleMessage->m_match` is overwritten in the loop above, so only the last matching variable is effectively logged.\r\n\r\nIs this something intented? My colleagues say that ModSecurity v2 used to log all rule matches in the same scenario.", + "summary": "Investigate logging behavior of ModSecurity rules. Confirm that only the last match of a rule is logged when multiple matches occur. \n\n1. Review the provided C++ code and the current logging mechanism within ModSecurity. Locate the relevant sections:\n - `performLogging` within `rule_with_actions.cc` to understand why `lastLog=false` and `hasMultimatch()=false`.\n - The `RuleWithOperator::evaluate` function in `rule_with_operator.cc` to analyze how matches are logged and how `ruleMessage->m_match` is overwritten.\n\n2. Compare with ModSecurity v2's behavior to determine if this logging behavior is intentional or a regression.\n\n3. Modify the logging mechanism to capture all matches. Potential steps include:\n - Introduce a data structure to store multiple matches instead of overwriting `ruleMessage->m_match`.\n - Adjust the logging logic to iterate through all matches and log each one.\n\n4. Test the revised implementation to ensure all matches are logged as expected.\n\n5. Document findings and proposed changes in the issue for community feedback and review.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2512", @@ -61355,6 +63490,7 @@ "node_id": "MDU6SXNzdWU4MDkwNzM1ODc=", "title": "bad request leads to two responses", "body": "**Bug Description**\r\n\r\nAfter specific wrong request I get second response from Apache httpd server:\r\n\r\n```\r\n$ printf \"GET / HTTP/1.1\\r\\nHost: localhost\\r\\nTransfer-Encoding: chunked\\r\\n\\r\\n\\r\\n\" | nc localhost 60080\r\nHTTP/1.1 400 Bad Request\r\nDate: Thu, 11 Feb 2021 12:48:06 GMT\r\nServer: Apache/2.4.46 (Linux/SUSE)\r\nConnection: close\r\nContent-Type: text/html; charset=iso-8859-1\r\n\r\n400\r\nIt works!\r\n$\r\n```\r\nWhere\r\n```\r\nIt works!\r\n```\r\nis content of index.html in docroot.\r\n\r\n**Logs and dumps**\r\n\r\nNot necessary.\r\n\r\n**To Reproduce**\r\n\r\nMinimal httpd config:\r\n\r\n```\r\nServerName test\r\nUser user\r\nGroup users\r\nListen 60080\r\nPidFile /tmp/apache-rex/poc/pid\r\nErrorLog /tmp/apache-rex/poc/error_log\r\nLoadModule auth_basic_module /usr/lib64/apache2-prefork/mod_auth_basic.so\r\nLoadModule dir_module /usr/lib64/apache2-prefork/mod_dir.so\r\nLoadModule authz_host_module /usr/lib64/apache2-prefork/mod_authz_host.so\r\nLoadModule authz_core_module /usr/lib64/apache2-prefork/mod_authz_core.so\r\nLoadModule unique_id_module /usr/lib64/apache2-prefork/mod_unique_id.so\r\nLoadModule security2_module /usr/lib64/apache2/mod_security2.so\r\nLogLevel trace8\r\nDocumentRoot /srv/www/htdocs\r\nDirectoryIndex index.html\r\n\r\nErrorDocument 400 \"400\\n\"\r\n\r\nSecRuleEngine DetectionOnly\r\nSecRequestBodyAccess on\r\nSecAuditLogType Serial\r\nSecAuditLog /tmp/apache-rex/poc/modsec_audit.log\r\nSecTmpDir /tmp/\r\nSecDataDir /tmp/\r\n\r\n### example configuration\r\n\r\n\r\n Require local\r\n\r\n```\r\n\r\n**Expected behavior**\r\n\r\n```\r\n$ printf \"GET / HTTP/1.1\\r\\nHost: localhost\\r\\nTransfer-Encoding: chunked\\r\\n\\r\\n\\r\\n\" | nc localhost 60080\r\nHTTP/1.1 400 Bad Request\r\nDate: Thu, 11 Feb 2021 12:48:06 GMT\r\nServer: Apache/2.4.46 (Linux/SUSE)\r\nConnection: close\r\nContent-Type: text/html; charset=iso-8859-1\r\n\r\n400\r\n```\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): ModSecurity v2.9.3, v2.9.2\r\n - WebServer: Apache httpd 2.4.46\r\n - OS (and distro): openSUSE Tumbleweed\r\n \r\n Tested also with Ubuntu 20.04/http 2.4.41/mod_security 2.9.3 and fedora33/httpd 2.4.46/mod_security 2.9.3.\r\n\r\n**Patch**\r\n\r\nYann Ylavic (@ylavic) wrote following patch:\r\n\r\n```\r\ndiff -ru modsecurity-2.9.3.old/apache2/apache2_io.c modsecurity-2.9.3.new/apache2/apache2_io.c\r\n--- modsecurity-2.9.3.old/apache2/apache2_io.c 2018-12-04 19:49:37.000000000 +0100\r\n+++ modsecurity-2.9.3.new/apache2/apache2_io.c 2021-02-12 13:28:27.739749566 +0100\r\n@@ -209,6 +209,10 @@\r\n * too large and APR_EGENERAL when the client disconnects.\r\n */\r\n switch(rc) {\r\n+ case AP_FILTER_ERROR :\r\n+ *error_msg = apr_pstrdup(msr->mp, \"Error reading request body: filter error\");\r\n+ return -8;\r\n+\r\n case APR_INCOMPLETE :\r\n *error_msg = apr_psprintf(msr->mp, \"Error reading request body: %s\", get_apr_error(msr->mp, rc));\r\n return -7;\r\n@@ -218,7 +222,7 @@\r\n case APR_TIMEUP :\r\n *error_msg = apr_psprintf(msr->mp, \"Error reading request body: %s\", get_apr_error(msr->mp, rc));\r\n return -4;\r\n- case AP_FILTER_ERROR :\r\n+ case APR_ENOSPC:\r\n *error_msg = apr_psprintf(msr->mp, \"Error reading request body: HTTP Error 413 - Request entity too large. (Most likely.)\");\r\n return -3;\r\n case APR_EGENERAL :\r\ndiff -ru modsecurity-2.9.3.old/apache2/mod_security2.c modsecurity-2.9.3.new/apache2/mod_security2.c\r\n--- modsecurity-2.9.3.old/apache2/mod_security2.c 2018-12-04 19:49:37.000000000 +0100\r\n+++ modsecurity-2.9.3.new/apache2/mod_security2.c 2021-02-12 13:34:22.940428406 +0100\r\n@@ -1013,7 +1013,7 @@\r\n }\r\n\r\n rc = read_request_body(msr, &my_error_msg);\r\n- if (rc < 0 && msr->txcfg->is_enabled == MODSEC_ENABLED) {\r\n+ if (rc < 0) {\r\n switch(rc) {\r\n case -1 :\r\n if (my_error_msg != NULL) {\r\n@@ -1021,6 +1021,21 @@\r\n }\r\n return HTTP_INTERNAL_SERVER_ERROR;\r\n break;\r\n+ case -2 : /* Bad request. */\r\n+ case -6 : /* EOF when reading request body. */\r\n+ case -7 : /* Partial recieved */\r\n+ if (my_error_msg != NULL) {\r\n+ msr_log(msr, 4, \"%s\", my_error_msg);\r\n+ }\r\n+ r->connection->keepalive = AP_CONN_CLOSE;\r\n+ return HTTP_BAD_REQUEST;\r\n+ break;\r\n+ case -3 : /* Apache's LimitRequestBody. */\r\n+ if (my_error_msg != NULL) {\r\n+ msr_log(msr, 1, \"%s\", my_error_msg);\r\n+ }\r\n+ return HTTP_REQUEST_ENTITY_TOO_LARGE;\r\n+ break;\r\n case -4 : /* Timeout. */\r\n if (my_error_msg != NULL) {\r\n msr_log(msr, 4, \"%s\", my_error_msg);\r\n@@ -1042,19 +1057,11 @@\r\n }\r\n }\r\n break;\r\n- case -6 : /* EOF when reading request body. */\r\n- if (my_error_msg != NULL) {\r\n- msr_log(msr, 4, \"%s\", my_error_msg);\r\n- }\r\n- r->connection->keepalive = AP_CONN_CLOSE;\r\n- return HTTP_BAD_REQUEST;\r\n- break;\r\n- case -7 : /* Partial recieved */\r\n+ case -8 : /* Filter error. */\r\n if (my_error_msg != NULL) {\r\n- msr_log(msr, 4, \"%s\", my_error_msg);\r\n+ msr_log(msr, 1, \"%s\", my_error_msg);\r\n }\r\n- r->connection->keepalive = AP_CONN_CLOSE;\r\n- return HTTP_BAD_REQUEST;\r\n+ return AP_FILTER_ERROR;\r\n break;\r\n default :\r\n /* allow through */\r\n```\r\n\r\nI can send a PR, but I am not the author.", + "summary": "Investigate the issue of receiving two responses from the Apache HTTP server after a bad request. The expected behavior is to receive a single `HTTP/1.1 400 Bad Request` response, yet an additional response containing the content of `index.html` is returned.\n\n1. **Reproduce the Issue**: Use the following command to send a malformed HTTP request to the server:\n ```bash\n printf \"GET / HTTP/1.1\\r\\nHost: localhost\\r\\nTransfer-Encoding: chunked\\r\\n\\r\\n\\r\\n\" | nc localhost 60080\n ```\n\n2. **Review Configuration**: Examine the provided minimal `httpd` configuration. Pay attention to the `ErrorDocument` directive and ensure that it is not causing the additional response.\n\n3. **Analyze ModSecurity Setup**: Confirm that ModSecurity version 2.9.3 is correctly configured. The current implementation may not be handling errors appropriately, leading to multiple responses.\n\n4. **Implement the Suggested Patch**: Apply the provided patch by Yann Ylavic to the `apache2_io.c` and `mod_security2.c` files. This patch modifies the error handling logic, which should help in returning the correct bad request response without additional output:\n - Handle `AP_FILTER_ERROR` and other error cases properly in the request body reading logic.\n\n5. **Test the Changes**: After applying the patch, restart the Apache server and re-run the malformed request to verify that only the expected `HTTP/1.1 400 Bad Request` response is returned.\n\n6. **Prepare for Pull Request**: If the patch resolves the issue, consider submitting a pull request (PR) with the changes, ensuring to credit the original author of the patch.\n\nBy following these steps, you should be able to resolve the issue of multiple responses and improve the handling of bad requests in the Apache HTTP server with ModSecurity.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2514", @@ -61370,9 +63506,9 @@ "repository": 1213, "assignees": [], "labels": [ - 398, 400, - 403 + 403, + 398 ] } }, @@ -61385,6 +63521,7 @@ "node_id": "MDU6SXNzdWU4MjE3OTMxNDA=", "title": "Can I use m.getvar(\"RULE.id\") to get the current rule id in 3.x version?", "body": "Can I use `m.getvar(\"RULE.id\")` to get the current rule id in the 3.x version? It is normal for me to use `m.getvar(\"REMOTE_ADDR\")`. How do I get the rule in the 3.x version? id, thanks", + "summary": "Check if `m.getvar(\"RULE.id\")` is supported in version 3.x. Investigate the documentation for version 3.x to confirm the availability of `m.getvar()` for retrieving the current rule ID. \n\nIf it's not directly available, consider these steps:\n\n1. Look into alternative methods for accessing rule variables in version 3.x.\n2. Explore the possibility of using another variable or function that provides rule context.\n3. Review recent updates or changes in the 3.x version changelog that might affect variable retrieval.\n4. Test any suggested methods in a development environment to validate functionality.\n\nDocument findings and share results with the community for further assistance or clarification.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2527", @@ -61402,8 +63539,8 @@ 939 ], "labels": [ - 399, - 409 + 409, + 399 ] } }, @@ -61416,6 +63553,7 @@ "node_id": "MDU6SXNzdWU4NzQ5ODkzOTI=", "title": "Are there any plans to create the Reference Manual for 3.x?", "body": "I know a lot changed between 2.x and 3.x and seeing as how 3.x was released in 2017 documentation on it would be helpful.", + "summary": "Create a Reference Manual for 3.x version. Acknowledge significant changes between 2.x and 3.x that occurred since the release in 2017. Focus on documenting new features, functionalities, and any deprecated elements from the previous version. \n\nBegin by gathering all release notes from 3.x and earlier versions to identify key changes. Next, compile existing documentation and user feedback to determine gaps. Organize documentation structure, including sections for installation, configuration, and usage examples specific to 3.x. Engage with the community for contributions and suggestions to ensure comprehensive coverage of the new version.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2563", @@ -61446,6 +63584,7 @@ "node_id": "MDU6SXNzdWU4NzYzMDI5NDI=", "title": "XML: there is no way to obtain actual error message from libxml2 on parsing failures", "body": "**Describe the bug**\r\n\r\nIt's impossible to get the actual error message produced by libxml2 when there's an issue with passed XML data.\r\n\r\nWhile in some cases it's easy to get the required data by e.g. running `xmllint` against the payload (if it's known), in other cases it can be quite hard to understand what's went wrong (see e.g. https://github.com/SpiderLabs/ModSecurity-nginx/issues/246).\r\n\r\n**To Reproduce**\r\n\r\nLet's consider inherently incorrect XML like this:\r\n\r\n```\r\n\r\n\r\n\r\n \r\n \r\n Garmin International\r\n \r\n \r\n \r\n \r\n Example GPX Document\r\n \r\n \r\n 4.46\r\n \r\n \r\n \r\n 4.94\r\n \r\n \r\n \r\n 6.87\r\n \r\n \r\n \r\n \r\n\r\n```\r\n\r\nHere's the output of `xmllint`:\r\n\r\n```\r\nvagrant@vagrant:~$ xmllint -noout test.xml\r\n111:27: parser error : Opening and ending tag mismatch: gpx line 3 and gggpx\r\n\r\n ^\r\n```\r\n\r\nNow we push it to the nginx with modsecurity enabled:\r\n\r\n```\r\nvagrant@vagrant:~$ curl -v -d @test.xml -H \"Content-Type: text/xml\" http://localhost/modsec-full/upload/\r\n* Trying 127.0.0.1...\r\n* TCP_NODELAY set\r\n* Connected to localhost (127.0.0.1) port 80 (#0)\r\n> POST /modsec-full/upload/ HTTP/1.1\r\n> Host: localhost\r\n> User-Agent: curl/7.58.0\r\n> Accept: */*\r\n> Content-Type: text/xml\r\n> Content-Length: 1252\r\n> Expect: 100-continue\r\n> \r\n< HTTP/1.1 100 Continue\r\n* We are completely uploaded and fine\r\n< HTTP/1.1 400 Bad Request\r\n< Server: nginx/1.19.10\r\n< Date: Wed, 05 May 2021 10:13:35 GMT\r\n< Content-Type: text/html\r\n< Content-Length: 158\r\n< Connection: close\r\n< \r\n\r\n400 Bad Request\r\n\r\n

400 Bad Request

\r\n
nginx/1.19.10
\r\n\r\n\r\n* Closing connection 0\r\n```\r\n\r\n**Logs and dumps**\r\n\r\nOutput of:\r\n 1. DebugLogs (level 9)\r\n\r\n```\r\n[1620209639] [] [4] Initializing transaction\r\n[1620209639] [] [4] Transaction context created.\r\n[1620209639] [] [4] Starting phase CONNECTION. (SecRules 0)\r\n[1620209639] [] [9] This phase consists of 0 rule(s).\r\n[1620209639] [] [4] Starting phase URI. (SecRules 0 + 1/2)\r\n[1620209639] [/modsec-full/upload/] [4] Starting phase REQUEST_HEADERS. (SecRules 1)\r\n[1620209639] [/modsec-full/upload/] [9] This phase consists of 2 rule(s).\r\n[1620209639] [/modsec-full/upload/] [4] (Rule: 200000) Executing operator \"Rx\" with param \"(?:application(?:/soap\\+|/)|text/)xml\" against REQUEST_HEADERS:Content-Type.\r\n[1620209639] [/modsec-full/upload/] [9] T (0) t:lowercase: \"text/xml\"\r\n[1620209639] [/modsec-full/upload/] [9] Target value: \"text/xml\" (Variable: REQUEST_HEADERS:Content-Type)\r\n[1620209639] [/modsec-full/upload/] [9] Matched vars updated.\r\n[1620209639] [/modsec-full/upload/] [4] Rule returned 1.\r\n[1620209639] [/modsec-full/upload/] [9] Running action: nolog\r\n[1620209639] [/modsec-full/upload/] [9] Running action: ctl\r\n[1620209639] [/modsec-full/upload/] [4] Running (disruptive) action: pass.\r\n[1620209639] [/modsec-full/upload/] [8] Running action pass\r\n[1620209639] [/modsec-full/upload/] [4] (Rule: 200001) Executing operator \"Rx\" with param \"application/json\" against REQUEST_HEADERS:Content-Type.\r\n[1620209639] [/modsec-full/upload/] [9] T (0) t:lowercase: \"text/xml\"\r\n[1620209639] [/modsec-full/upload/] [9] Target value: \"text/xml\" (Variable: REQUEST_HEADERS:Content-Type)\r\n[1620209639] [/modsec-full/upload/] [4] Rule returned 0.\r\n[1620209639] [/modsec-full/upload/] [9] Matched vars cleaned.\r\n[1620209639] [/modsec-full/upload/] [9] Appending request body: 1252 bytes. Limit set to: 13107200.000000\r\n[1620209639] [/modsec-full/upload/] [4] Starting phase REQUEST_BODY. (SecRules 2)\r\n[1620209639] [/modsec-full/upload/] [4] XML: Initialising parser.\r\n[1620209639] [/modsec-full/upload/] [4] XML: Parsing complete (well_formed 0).\r\n[1620209639] [/modsec-full/upload/] [4] XML: Failed parsing document.\r\n[1620209639] [/modsec-full/upload/] [9] This phase consists of 4 rule(s).\r\n[1620209639] [/modsec-full/upload/] [4] (Rule: 200002) Executing operator \"Eq\" with param \"0\" against REQBODY_ERROR.\r\n[1620209639] [/modsec-full/upload/] [9] Target value: \"1\" (Variable: REQBODY_ERROR)\r\n[1620209639] [/modsec-full/upload/] [9] Matched vars updated.\r\n[1620209639] [/modsec-full/upload/] [4] Rule returned 1.\r\n[1620209639] [/modsec-full/upload/] [9] This rule severity is: 2 current transaction is: 255\r\n[1620209639] [/modsec-full/upload/] [9] Saving msg: Failed to parse request body.\r\n[1620209639] [/modsec-full/upload/] [9] Running action: log\r\n[1620209639] [/modsec-full/upload/] [9] Saving transaction to logs\r\n[1620209639] [/modsec-full/upload/] [9] Running action: status\r\n[1620209639] [/modsec-full/upload/] [4] Running (disruptive) action: deny.\r\n[1620209639] [/modsec-full/upload/] [8] Running action deny\r\n[1620209639] [/modsec-full/upload/] [8] Skipping this phase as this request was already intercepted.\r\n[1620209639] [/modsec-full/upload/] [4] Starting phase RESPONSE_HEADERS. (SecRules 3)\r\n[1620209639] [/modsec-full/upload/] [9] This phase consists of 0 rule(s).\r\n[1620209639] [/modsec-full/upload/] [9] Appending response body: 104 bytes. Limit set to: 524288.000000\r\n[1620209639] [/modsec-full/upload/] [9] Appending response body: 158 bytes. Limit set to: 524288.000000\r\n[1620209639] [/modsec-full/upload/] [4] Starting phase RESPONSE_BODY. (SecRules 4)\r\n[1620209639] [/modsec-full/upload/] [9] This phase consists of 0 rule(s).\r\n[1620209639] [/modsec-full/upload/] [4] Starting phase LOGGING. (SecRules 5)\r\n[1620209639] [/modsec-full/upload/] [9] This phase consists of 0 rule(s).\r\n[1620209639] [/modsec-full/upload/] [8] Checking if this request is suitable to be saved as an audit log.\r\n[1620209639] [/modsec-full/upload/] [8] Checking if this request is relevant to be part of the audit logs.\r\n[1620209639] [/modsec-full/upload/] [5] Saving this request as part of the audit logs.\r\n[1620209639] [/modsec-full/upload/] [8] Request was relevant to be saved. Parts: 6006\r\n```\r\n\r\n 2. AuditLogs\r\n\r\n```\r\n---sWcjvjKz---A--\r\n[05/May/2021:10:13:59 +0000] 1620209639 127.0.0.1 10534 127.0.0.1 80\r\n---sWcjvjKz---B--\r\nPOST /modsec-full/upload/ HTTP/1.1\r\nHost: localhost\r\nUser-Agent: curl/7.58.0\r\nAccept: */*\r\nContent-Type: text/xml\r\nContent-Length: 1252\r\nExpect: 100-continue\r\n\r\n---sWcjvjKz---D--\r\n\r\n---sWcjvjKz---E--\r\n\\x0d\\x0a400 Bad Request\\x0d\\x0a\\x0d\\x0a

400 Bad Request

\\x0d\\x0a
nginx/1.19.10
\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\r\n\r\n---sWcjvjKz---F--\r\nHTTP/1.1 400\r\nServer: nginx/1.19.10\r\nDate: Wed, 05 May 2021 10:13:59 GMT\r\nContent-Length: 158\r\nContent-Type: text/html\r\nConnection: close\r\n\r\n---sWcjvjKz---H--\r\nModSecurity: Access denied with code 400 (phase 2). Matched \"Operator `Eq' with parameter `0' against variable `REQBODY_ERROR' (Value: `1' ) [file \"/etc/nginx/modsec/modsecurity.conf\"] [line \"10\"] [id \"200002\"] [rev \"\"] [msg \"Failed to parse request body.\"] [data \"XML parsing error: XML: Failed parsing document.\"] [severity \"2\"] [ver \"\"] [maturity \"0\"] [accuracy \"0\"] [hostname \"127.0.0.1\"] [uri \"/modsec-full/upload/\"] [unique_id \"1620209639\"] [ref \"v152,1\"]\r\n\r\n---sWcjvjKz---I--\r\n\r\n---sWcjvjKz---J--\r\n\r\n---sWcjvjKz---Z--\r\n```\r\n\r\n 3. Error logs\r\n\r\n```\r\n2021/05/05 10:13:59 [error] 20269#20269: *3 [client 127.0.0.1] ModSecurity: Access denied with code 400 (phase 2). Matched \"Operator `Eq' with parameter `0' against variable `REQBODY_ERROR' (Value: `1' ) [file \"/etc/nginx/modsec/modsecurity.conf\"] [line \"10\"] [id \"200002\"] [rev \"\"] [msg \"Failed to parse request body.\"] [data \"XML parsing error: XML: Failed parsing document.\"] [severity \"2\"] [ver \"\"] [maturity \"0\"] [accuracy \"0\"] [hostname \"127.0.0.1\"] [uri \"/modsec-full/upload/\"] [unique_id \"1620209639\"] [ref \"v152,1\"], client: 127.0.0.1, server: , request: \"POST /modsec-full/upload/ HTTP/1.1\", host: \"localhost\"\r\n```\r\n\r\n**Expected behavior**\r\n\r\nClear and unambiguous message (`parser error : Opening and ending tag mismatch [..]`) as emitted by `xmllint` instead of cryptic \"Failed to parse request body\" or \"XML: failed parsing document\".\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): v3/master (v3.0.4-119-g1376882f), ModSecurity-nginx (v1.0.1-23-g21bc821)\r\n - WebServer: nginx/1.19.10\r\n\r\n**Rule Set (please complete the following information):**\r\n\r\nNo actual ruleset involved, just plain modsecurity.conf from the v3/master.\r\n\r\n**Additional context**\r\n\r\nWith the following dummy patch I was able to get the actual error message to a file:\r\n\r\n```\r\ndiff --git a/src/request_body_processor/xml.cc b/src/request_body_processor/xml.cc\r\nindex 6d8a5ec1..6df70e86 100644\r\n--- a/src/request_body_processor/xml.cc\r\n+++ b/src/request_body_processor/xml.cc\r\n@@ -106,7 +106,10 @@ bool XML::processChunk(const char *buf, unsigned int size,\r\n }\r\n \r\n /* Not a first invocation. */\r\n+ ms_dbg_a(m_transaction, 4, \"XML: calling xmlParseChunk for buf size \" + std::to_string(size));\r\n xmlParseChunk(m_data.parsing_ctx, buf, size, 0);\r\n+\r\n+\r\n if (m_data.parsing_ctx->wellFormed != 1) {\r\n error->assign(\"XML: Failed to create parsing context.\");\r\n ms_dbg_a(m_transaction, 4, \"XML: Failed parsing document.\");\r\ndiff --git a/src/request_body_processor/xml.h b/src/request_body_processor/xml.h\r\nindex 1d29f62f..3540e8a4 100644\r\n--- a/src/request_body_processor/xml.h\r\n+++ b/src/request_body_processor/xml.h\r\n@@ -20,6 +20,7 @@\r\n \r\n #include \r\n #include \r\n+#include \r\n \r\n #include \"modsecurity/transaction.h\"\r\n #include \"modsecurity/rules_set.h\"\r\n@@ -54,6 +55,16 @@ class XML {\r\n xmlCharEncoding enc);\r\n \r\n static void null_error(void *ctx, const char *msg, ...) {\r\n+\t char buf[512];\r\n+\t std::ofstream f;\r\n+\t va_list args;\r\n+\t va_start (args, msg);\r\n+\t vsprintf(buf, msg, args);\r\n+\t va_end (args);\r\n+\r\n+\t f.open(\"/tmp/xml.err\", std::ios_base::app);\r\n+\t f << buf;\r\n+\t f.close();\r\n }\r\n \r\n \r\n```\r\n\r\nFile contents:\r\n\r\n```\r\nbody.xml:1: parser error : Opening and ending tag mismatch: gpx line 0 and gggpx\r\n \r\n ^\r\n```\r\n\r\n(It's just for a demo, of course I'm sure there must be a better way of handling this so the error(s) would go to appropriate context / loggers.)", + "summary": "Implement a solution to capture and report actual error messages from libxml2 during XML parsing failures in ModSecurity. Currently, the error output is vague, simply stating \"Failed to parse request body\" or \"XML: failed parsing document,\" which does not provide useful debugging information.\n\n### Steps to Address the Issue:\n\n1. **Modify XML Error Handling:**\n Modify the XML processor in ModSecurity to catch errors thrown by libxml2 and log detailed error messages. You can enhance the `null_error` function to log messages to a specific file or the system logger instead of discarding them.\n\n Example code snippet:\n ```cpp\n static void null_error(void *ctx, const char *msg, ...) {\n char buf[512];\n std::ofstream f;\n va_list args;\n va_start (args, msg);\n vsprintf(buf, msg, args);\n va_end (args);\n f.open(\"/tmp/xml.err\", std::ios_base::app);\n f << buf;\n f.close();\n }\n ```\n\n2. **Integrate Detailed Error Reporting:**\n Ensure that the error message generated by libxml2 is captured and mapped to the ModSecurity error handling. This involves updating the parsing logic to extract the specific error message from libxml2's context.\n\n3. **Logging Improvements:**\n Enhance the log level and format to include error messages so that they can be easily analyzed. Consider using structured logging for better readability and filtering.\n\n4. **Testing:**\n Test the changes using known invalid XML data to confirm that detailed, actionable error messages are logged appropriately. You can use the provided XML example as a test case.\n\n5. **Documentation:**\n Update the documentation to reflect the changes made and provide guidance on interpreting the new error messages.\n\n6. **Review and Refactor:**\n Once initial changes are made, conduct a code review to ensure adherence to coding standards and optimize the error handling workflow.\n\nBy following these steps, you will improve the clarity of XML parsing errors in ModSecurity, making it easier for developers to diagnose issues with XML data submissions.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2565", @@ -61475,6 +63614,7 @@ "node_id": "MDU6SXNzdWU5NDA2MTQ1NjI=", "title": "Compile Modsecurity v3.x for WASM architecture ", "body": "Using emscripten (`emconfigure` and `emmake`) I am working on compiling the library libmodsecurity for WebAssembly.\r\n[Referring to this issue](https://github.com/emscripten-core/emscripten/issues/14221) and [this solution](https://github.com/emscripten-core/emscripten/issues/14221#issuecomment-844733534), the main problem that I'm facing is that I am including paths to system headers (`-I/usr/include` `-I/usr/include/libxml2`). Could anyone help me to figure out how to modify the MakeFile to avoid these inclusions?\r\nThe command that I should change is: \r\n```\r\n/home/user/Tesi/extensions/manualBuild/emsdk/upstream/bin/clang++ -target wasm32-unknown-emscripten -DEMSCRIPTEN -fignore-exceptions -mllvm -combiner-global-alias-analysis=false -mllvm -enable-emscripten-sjlj -mllvm -disable-lsr -D__EMSCRIPTEN_major__=2 -D__EMSCRIPTEN_minor__=0 -D__EMSCRIPTEN_tiny__=25 -D_LIBCPP_ABI_VERSION=2 -Dunix -D__unix -D__unix__ -Werror=implicit-function-declaration -Xclang -iwithsysroot/include/SDL --sysroot=/home/user/Tesi/extensions/manualBuild/emsdk/upstream/emscripten/cache/sysroot -Xclang -iwithsysroot/include/compat -DHAVE_CONFIG_H -I. -std=c++11 -I.. -g -I../others -fPIC -O3 -I../headers -DWITH_CURL_SSLVERSION_TLSv1_2 -DWITH_CURL -DWITH_GEOIP -I/usr/include -I/usr/include/libxml2 -DWITH_LIBXML2 -g -O2 -MT parser/libmodsecurity_la-seclang-parser.lo -MD -MP -MF parser/.deps/libmodsecurity_la-seclang-parser.Tpo -c -fPIC -DPIC parser/seclang-parser.cc -o parser/.libs/libmodsecurity_la-seclang-parser.o\r\n```\r\nAction performed are just:\r\n`./build.sh`\r\n`emconfigure ./configure`\r\n`emmake make`\r\nResulting in this error:\r\n```\r\n/bin/bash ../libtool --tag=CXX --mode=compile /home/user/Tesi/extensions/manualBuild/emsdk/upstream/emscripten/em++ -DHAVE_CONFIG_H -I. -std=c++11 -I.. -g -I../others -fPIC -O3 -I../headers -DWITH_CURL_SSLVERSION_TLSv1_2 -DWITH_CURL -DWITH_GEOIP -I/usr/include -I/usr/include/libxml2 -DWITH_LIBXML2 -g -O2 -MT parser/libmodsecurity_la-seclang-parser.lo -MD -MP -MF parser/.deps/libmodsecurity_la-seclang-parser.Tpo -c -o parser/libmodsecurity_la-seclang-parser.lo `test -f 'parser/seclang-parser.cc' || echo './'`parser/seclang-parser.cc\r\nlibtool: compile: /home/user/Tesi/extensions/manualBuild/emsdk/upstream/emscripten/em++ -DHAVE_CONFIG_H -I. -std=c++11 -I.. -g -I../others -fPIC -O3 -I../headers -DWITH_CURL_SSLVERSION_TLSv1_2 -DWITH_CURL -DWITH_GEOIP -I/usr/include -I/usr/include/libxml2 -DWITH_LIBXML2 -g -O2 -MT parser/libmodsecurity_la-seclang-parser.lo -MD -MP -MF parser/.deps/libmodsecurity_la-seclang-parser.Tpo -c parser/seclang-parser.cc -fPIC -DPIC -o parser/.libs/libmodsecurity_la-seclang-parser.o\r\nIn file included from parser/seclang-parser.cc:41:\r\nIn file included from seclang-parser.yy:11:\r\nIn file included from /home/user/Tesi/extensions/manualBuild/emsdk/upstream/emscripten/cache/sysroot/include/c++/v1/string:511:\r\nIn file included from /home/user/Tesi/extensions/manualBuild/emsdk/upstream/emscripten/cache/sysroot/include/c++/v1/string_view:179:\r\nIn file included from /home/user/Tesi/extensions/manualBuild/emsdk/upstream/emscripten/cache/sysroot/include/c++/v1/__string:57:\r\nIn file included from /home/user/Tesi/extensions/manualBuild/emsdk/upstream/emscripten/cache/sysroot/include/c++/v1/algorithm:651:\r\nIn file included from /home/user/Tesi/extensions/manualBuild/emsdk/upstream/emscripten/cache/sysroot/include/c++/v1/cstring:60:\r\n/usr/include/string.h:26:10: fatal error: 'bits/libc-header-start.h' file not found\r\n#include \r\n ^~~~~~~~~~~~~~~~~~~~~~~~~~\r\n1 error generated.\r\nem++: error: '/home/user/Tesi/extensions/manualBuild/emsdk/upstream/bin/clang++ -target wasm32-unknown-emscripten -DEMSCRIPTEN -fignore-exceptions -mllvm -combiner-global-alias-analysis=false -mllvm -enable-emscripten-sjlj -mllvm -disable-lsr -D__EMSCRIPTEN_major__=2 -D__EMSCRIPTEN_minor__=0 -D__EMSCRIPTEN_tiny__=25 -D_LIBCPP_ABI_VERSION=2 -Dunix -D__unix -D__unix__ -Werror=implicit-function-declaration -Xclang -iwithsysroot/include/SDL --sysroot=/home/user/Tesi/extensions/manualBuild/emsdk/upstream/emscripten/cache/sysroot -Xclang -iwithsysroot/include/compat -DHAVE_CONFIG_H -I. -std=c++11 -I.. -g -I../others -fPIC -O3 -I../headers -DWITH_CURL_SSLVERSION_TLSv1_2 -DWITH_CURL -DWITH_GEOIP -I/usr/include -I/usr/include/libxml2 -DWITH_LIBXML2 -g -O2 -MT parser/libmodsecurity_la-seclang-parser.lo -MD -MP -MF parser/.deps/libmodsecurity_la-seclang-parser.Tpo -c -fPIC -DPIC parser/seclang-parser.cc -o parser/.libs/libmodsecurity_la-seclang-parser.o' failed (returned 1)\r\nmake[3]: *** [Makefile:2072: parser/libmodsecurity_la-seclang-parser.lo] Error 1\r\nmake[3]: Leaving directory '/home/user/Tesi/extensions/manualBuild/ModSecurity/src'\r\nmake[2]: *** [Makefile:3479: all-recursive] Error 1\r\nmake[2]: Leaving directory '/home/user/Tesi/extensions/manualBuild/ModSecurity/src'\r\nmake[1]: *** [Makefile:1224: all] Error 2\r\nmake[1]: Leaving directory '/home/user/Tesi/extensions/manualBuild/ModSecurity/src'\r\nmake: *** [Makefile:1035: all-recursive] Error 1\r\nemmake: error: 'make' failed (returned 2)\r\n```\r\nThanks for any help about the generation and the possibility to change this building phase", + "summary": "Compile ModSecurity v3.x for the WebAssembly (WASM) architecture using Emscripten. Focus on modifying the Makefile to eliminate the inclusion paths to system headers (`-I/usr/include` and `-I/usr/include/libxml2`) that are causing build errors.\n\n### Steps to Address the Issue:\n\n1. **Modify the Makefile**:\n - Locate the Makefile used during the build process.\n - Identify the compiler flags section where `CXXFLAGS` or similar variables are defined.\n - Remove or comment out the lines that include the system header paths. For example:\n ```makefile\n # -I/usr/include\n # -I/usr/include/libxml2\n ```\n\n2. **Utilize Emscripten-specific Flags**:\n - Ensure that the correct Emscripten flags are set for compiling C++ code. You may need to add `-s USE_LIBXML2=1` or similar flags if you are using any libraries that require configuration for WASM.\n\n3. **Reconfigure the Build**:\n - Run the following commands to reconfigure and build:\n ```bash\n ./build.sh\n emconfigure ./configure\n emmake make\n ```\n\n4. **Check for Additional Dependencies**:\n - If the library requires certain dependencies, ensure they are either compiled for WASM or removed from the dependency list if unnecessary.\n\n5. **Handle Missing Header Issues**:\n - If the build continues to fail due to missing headers, verify that the necessary headers are available in the Emscripten sysroot. Adjust the include paths accordingly, using Emscripten's sysroot paths instead of system paths.\n\n6. **Testing Compilation**:\n - After making the changes, test the compilation again to ensure that the errors regarding missing system headers are resolved.\n\n### Expected Outcome:\nBy following these steps, the build process should successfully compile ModSecurity for WASM without encountering issues related to system header inclusions.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2590", @@ -61505,6 +63645,7 @@ "node_id": "MDU6SXNzdWU5ODU1MzgzMDA=", "title": "build broken on Debian bullseye - ../libtool: line 1733: 84059 Aborted (core dumped) ar cr .libs/libmodsecurity.a", "body": "```\r\ngit clone --depth 1 -b v3/master --single-branch https://github.com/SpiderLabs/ModSecurity\r\n```\r\n```\r\ncd ModSecurity\r\n```\r\n```\r\n~/ModSecurity$ ./build.sh\r\nlibtoolize: putting auxiliary files in '.'.\r\nlibtoolize: copying file './ltmain.sh'\r\nlibtoolize: putting macros in AC_CONFIG_MACRO_DIRS, 'build'.\r\nlibtoolize: copying file 'build/libtool.m4'\r\nlibtoolize: copying file 'build/ltoptions.m4'\r\nlibtoolize: copying file 'build/ltsugar.m4'\r\nlibtoolize: copying file 'build/ltversion.m4'\r\nlibtoolize: copying file 'build/lt~obsolete.m4'\r\nfatal: No names found, cannot describe anything.\r\nfatal: No names found, cannot describe anything.\r\nfatal: No names found, cannot describe anything.\r\nconfigure.ac:50: installing './compile'\r\nconfigure.ac:45: installing './missing'\r\nexamples/multiprocess_c/Makefile.am: installing './depcomp'\r\nfatal: No names found, cannot describe anything.\r\n```\r\n\r\nIs `fatal: No names found, cannot describe anything.` an issue? https://github.com/SpiderLabs/ModSecurity/issues/1630#issuecomment-352416967 says `./build.sh` can be skipped.\r\n\r\n```\r\nuser@Debian-105-buster-64-minimal:~/ModSecurity$ ./configure\r\nchecking for a BSD-compatible install... /usr/bin/install -c\r\nchecking whether build environment is sane... yes\r\nchecking for a thread-safe mkdir -p... /usr/bin/mkdir -p\r\nchecking for gawk... gawk\r\nchecking whether make sets $(MAKE)... yes\r\nchecking whether make supports nested variables... yes\r\nchecking for g++... g++\r\nchecking whether the C++ compiler works... yes\r\nchecking for C++ compiler default output file name... a.out\r\nchecking for suffix of executables... \r\nchecking whether we are cross compiling... no\r\nchecking for suffix of object files... o\r\nchecking whether we are using the GNU C++ compiler... yes\r\nchecking whether g++ accepts -g... yes\r\nchecking whether make supports the include directive... yes (GNU style)\r\nchecking dependency style of g++... gcc3\r\nchecking for gcc... gcc\r\nchecking whether we are using the GNU C compiler... yes\r\nchecking whether gcc accepts -g... yes\r\nchecking for gcc option to accept ISO C89... none needed\r\nchecking whether gcc understands -c and -o together... yes\r\nchecking dependency style of gcc... gcc3\r\nchecking for ar... ar\r\nchecking the archiver (ar) interface... ar\r\nchecking whether make sets $(MAKE)... (cached) yes\r\nchecking for pkg-config... /usr/bin/pkg-config\r\nchecking pkg-config is at least version 0.9.0... yes\r\nconfigure: Nothing about YAJL was informed during the configure phase. Trying to detect it on the platform...\r\nconfigure: using YAJL v2.1.0\r\nconfigure: Nothing about GeoIP was informed during the configure phase. Trying to detect it on the platform...\r\nconfigure: using GeoIP v1.6.12\r\nconfigure: Nothing about MaxMind was informed during the configure phase. Trying to detect it on the platform...\r\nconfigure: MaxMind library was not found\r\nconfigure: Nothing about LMDB was informed during the configure phase. Trying to detect it on the platform...\r\nconfigure: LMDB is disabled by default.\r\n*** LOOKING AT PATH: /usr/lib\r\n*** LOOKING AT PATH: /usr/local/lib\r\n*** LOOKING AT PATH: /usr/local/fuzzy\r\n*** LOOKING AT PATH: /usr/local/libfuzzy\r\n*** LOOKING AT PATH: /usr/local\r\n*** LOOKING AT PATH: /opt\r\n*** LOOKING AT PATH: /usr\r\n*** LOOKING AT PATH: /usr/lib64\r\n*** LOOKING AT PATH: /opt/local\r\nconfigure: SSDEEP library was not found\r\n*** LOOKING AT PATH: /usr/lib\r\n*** LOOKING AT PATH: /usr/local/lib\r\n*** LOOKING AT PATH: /usr/local/lib64\r\n*** LOOKING AT PATH: /usr/local/lua\r\n*** LOOKING AT PATH: /usr/local/liblua\r\n*** LOOKING AT PATH: /usr/local\r\n*** LOOKING AT PATH: /opt\r\n*** LOOKING AT PATH: /usr\r\n*** LOOKING AT PATH: /usr/lib64\r\n*** LOOKING AT PATH: /opt/local\r\n*** LOOKING AT PATH: /usr/lib/lua5.3/liblua\r\n*** LOOKING AT PATH: /usr/lib/lua5.2/liblua\r\nconfigure: LUA library was not found\r\nchecking for libcurl config script... /usr/bin/curl-config\r\nconfigure: curl VERSION: 7.74.0 \r\nconfigure: curl LDADD: \r\nchecking if libcurl is at least v... yes, 7.74.0 \r\nchecking if libcurl is linked with gnutls... no\r\nconfigure: using curl v7.74.0 \r\nchecking for libxml2 config script... /usr/bin/xml2-config\r\nconfigure: xml VERSION: 2.9.10\r\nconfigure: xml CFLAGS: -I/usr/include/libxml2 -DWITH_LIBXML2\r\nconfigure: xml LDADD: -lxml2\r\nchecking if libxml2 is at least v2.6.29... yes, 2.9.10\r\nconfigure: using libxml2 v2.9.10\r\nchecking for libpcre config script... /usr/bin/pcre-config\r\nconfigure: pcre VERSION: 8.39\r\nconfigure: pcre LDADD: -lpcre\r\nconfigure: pcre PCRE_LD_PATH: /-lpcre\r\nchecking for PCRE JIT... yes\r\nconfigure: using pcre v8.39\r\nchecking how to run the C preprocessor... gcc -E\r\nchecking for grep that handles long lines and -e... /usr/bin/grep\r\nchecking for egrep... /usr/bin/grep -E\r\nchecking for ANSI C header files... yes\r\nchecking for sys/types.h... yes\r\nchecking for sys/stat.h... yes\r\nchecking for stdlib.h... yes\r\nchecking for string.h... yes\r\nchecking for memory.h... yes\r\nchecking for strings.h... yes\r\nchecking for inttypes.h... yes\r\nchecking for stdint.h... yes\r\nchecking for unistd.h... yes\r\nchecking string usability... no\r\nchecking string presence... no\r\nchecking for string... no\r\nchecking iostream usability... no\r\nchecking iostream presence... no\r\nchecking for iostream... no\r\nchecking sys/utsname.h usability... yes\r\nchecking sys/utsname.h presence... yes\r\nchecking for sys/utsname.h... yes\r\nchecking build system type... x86_64-pc-linux-gnu\r\nchecking host system type... x86_64-pc-linux-gnu\r\nchecking how to print strings... printf\r\nchecking for a sed that does not truncate output... /usr/bin/sed\r\nchecking for fgrep... /usr/bin/grep -F\r\nchecking for ld used by gcc... /usr/bin/ld\r\nchecking if the linker (/usr/bin/ld) is GNU ld... yes\r\nchecking for BSD- or MS-compatible name lister (nm)... /usr/bin/nm -B\r\nchecking the name lister (/usr/bin/nm -B) interface... BSD nm\r\nchecking whether ln -s works... yes\r\nchecking the maximum length of command line arguments... 1572864\r\nchecking how to convert x86_64-pc-linux-gnu file names to x86_64-pc-linux-gnu format... func_convert_file_noop\r\nchecking how to convert x86_64-pc-linux-gnu file names to toolchain format... func_convert_file_noop\r\nchecking for /usr/bin/ld option to reload object files... -r\r\nchecking for objdump... objdump\r\nchecking how to recognize dependent libraries... pass_all\r\nchecking for dlltool... no\r\nchecking how to associate runtime and link libraries... printf %s\\n\r\nchecking for archiver @FILE support... @\r\nchecking for strip... strip\r\nchecking for ranlib... ranlib\r\nchecking command to parse /usr/bin/nm -B output from gcc object... ok\r\nchecking for sysroot... no\r\nchecking for a working dd... /usr/bin/dd\r\nchecking how to truncate binary pipes... /usr/bin/dd bs=4096 count=1\r\nchecking for mt... mt\r\nchecking if mt is a manifest tool... no\r\nchecking for dlfcn.h... yes\r\nchecking for objdir... .libs\r\nchecking if gcc supports -fno-rtti -fno-exceptions... no\r\nchecking for gcc option to produce PIC... -fPIC -DPIC\r\nchecking if gcc PIC flag -fPIC -DPIC works... yes\r\nchecking if gcc static flag -static works... yes\r\nchecking if gcc supports -c -o file.o... yes\r\nchecking if gcc supports -c -o file.o... (cached) yes\r\nchecking whether the gcc linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes\r\nchecking whether -lc should be explicitly linked in... no\r\nchecking dynamic linker characteristics... GNU/Linux ld.so\r\nchecking how to hardcode library paths into programs... immediate\r\nchecking for shl_load... no\r\nchecking for shl_load in -ldld... no\r\nchecking for dlopen... no\r\nchecking for dlopen in -ldl... yes\r\nchecking whether a program can dlopen itself... yes\r\nchecking whether a statically linked program can dlopen itself... no\r\nchecking whether stripping libraries is possible... yes\r\nchecking if libtool supports shared libraries... yes\r\nchecking whether to build shared libraries... yes\r\nchecking whether to build static libraries... yes\r\nchecking how to run the C++ preprocessor... g++ -E\r\nchecking for ld used by g++... /usr/bin/ld -m elf_x86_64\r\nchecking if the linker (/usr/bin/ld -m elf_x86_64) is GNU ld... yes\r\nchecking whether the g++ linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes\r\nchecking for g++ option to produce PIC... -fPIC -DPIC\r\nchecking if g++ PIC flag -fPIC -DPIC works... yes\r\nchecking if g++ static flag -static works... yes\r\nchecking if g++ supports -c -o file.o... yes\r\nchecking if g++ supports -c -o file.o... (cached) yes\r\nchecking whether the g++ linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes\r\nchecking dynamic linker characteristics... (cached) GNU/Linux ld.so\r\nchecking how to hardcode library paths into programs... immediate\r\nChecking platform... Identified as Linux\r\nchecking for doxygen... no\r\nconfigure: WARNING: doxygen not found - will not generate any doxygen documentation\r\nchecking for perl... /usr/bin/perl\r\nchecking for valgrind... no\r\nchecking that generated files are newer than configure... done\r\nconfigure: creating ./config.status\r\nconfig.status: creating modsecurity.pc\r\nconfig.status: creating Makefile\r\nconfig.status: creating doc/Makefile\r\nconfig.status: creating src/Makefile\r\nconfig.status: creating others/Makefile\r\nconfig.status: creating tools/Makefile\r\nconfig.status: creating tools/rules-check/Makefile\r\nconfig.status: creating test/Makefile\r\nconfig.status: creating test/benchmark/Makefile\r\nconfig.status: creating examples/Makefile\r\nconfig.status: creating examples/simple_example_using_c/Makefile\r\nconfig.status: creating examples/multiprocess_c/Makefile\r\nconfig.status: creating examples/reading_logs_with_offset/Makefile\r\nconfig.status: creating examples/reading_logs_via_rule_message/Makefile\r\nconfig.status: creating examples/using_bodies_in_chunks/Makefile\r\nconfig.status: creating src/config.h\r\nconfig.status: src/config.h is unchanged\r\nconfig.status: executing depfiles commands\r\nconfig.status: executing libtool commands\r\n \r\n \r\nModSecurity - for Linux\r\n \r\n Mandatory dependencies\r\n + libInjection ....v3.9.2-46-gbfba51f\r\n + SecLang tests ....a3d4405\r\n \r\n Optional dependencies\r\n + GeoIP/MaxMind ....found \r\n * (GeoIP) v1.6.12\r\n -lGeoIP , -I/usr/include/ \r\n + LibCURL ....found v7.74.0 \r\n -lcurl, -DWITH_CURL_SSLVERSION_TLSv1_2 -DWITH_CURL\r\n + YAJL ....found v2.1.0\r\n -lyajl , -DWITH_YAJL -I/usr/include/yajl \r\n + LMDB ....disabled\r\n + LibXML2 ....found v2.9.10\r\n -lxml2, -I/usr/include/libxml2 -DWITH_LIBXML2\r\n + SSDEEP ....not found\r\n + LUA ....not found\r\n \r\n Other Options\r\n + Test Utilities ....enabled\r\n + SecDebugLog ....enabled\r\n + afl fuzzer ....disabled\r\n + library examples ....enabled\r\n + Building parser ....disabled\r\n + Treating pm operations as critical section ....disabled\r\n \r\n~/ModSecurity$\r\n```\r\n```\r\nmake\r\n```\r\n```\r\nmv -f variables/.deps/libmodsecurity_la-xml.Tpo variables/.deps/libmodsecurity_la-xml.Plo\r\n/bin/bash ../libtool --tag=CXX --mode=link g++ -g -O2 -L/usr/lib/x86_64-linux-gnu -L/usr/lib/x86_64-linux-gnu -L/usr/lib/x86_64-linux-gnu -version-info 3:5:0 -o libmodsecurity.la -rpath /usr/local/modsecurity/lib parser/libmodsecurity_la-seclang-parser.lo parser/libmodsecurity_la-seclang-scanner.lo parser/libmodsecurity_la-driver.lo libmodsecurity_la-transaction.lo libmodsecurity_la-anchored_set_variable.lo libmodsecurity_la-anchored_variable.lo audit_log/libmodsecurity_la-audit_log.lo audit_log/writer/libmodsecurity_la-writer.lo audit_log/writer/libmodsecurity_la-https.lo audit_log/writer/libmodsecurity_la-serial.lo audit_log/writer/libmodsecurity_la-parallel.lo libmodsecurity_la-modsecurity.lo libmodsecurity_la-rules_set.lo libmodsecurity_la-rules_set_phases.lo libmodsecurity_la-rules_set_properties.lo debug_log/libmodsecurity_la-debug_log.lo debug_log/libmodsecurity_la-debug_log_writer.lo libmodsecurity_la-run_time_string.lo libmodsecurity_la-rule.lo libmodsecurity_la-rule_unconditional.lo libmodsecurity_la-rule_with_actions.lo libmodsecurity_la-rule_with_operator.lo libmodsecurity_la-rule_message.lo libmodsecurity_la-rule_script.lo libmodsecurity_la-unique_id.lo libmodsecurity_la-rules_exceptions.lo request_body_processor/libmodsecurity_la-multipart.lo request_body_processor/libmodsecurity_la-xml.lo request_body_processor/libmodsecurity_la-json.lo actions/libmodsecurity_la-accuracy.lo actions/libmodsecurity_la-action.lo actions/libmodsecurity_la-audit_log.lo actions/libmodsecurity_la-block.lo actions/libmodsecurity_la-capture.lo actions/libmodsecurity_la-chain.lo actions/ctl/libmodsecurity_la-audit_log_parts.lo actions/ctl/libmodsecurity_la-rule_engine.lo actions/ctl/libmodsecurity_la-request_body_processor_json.lo actions/ctl/libmodsecurity_la-request_body_processor_xml.lo actions/ctl/libmodsecurity_la-request_body_processor_urlencoded.lo actions/ctl/libmodsecurity_la-rule_remove_target_by_tag.lo actions/ctl/libmodsecurity_la-rule_remove_target_by_id.lo actions/ctl/libmodsecurity_la-rule_remove_by_id.lo actions/ctl/libmodsecurity_la-rule_remove_by_tag.lo actions/ctl/libmodsecurity_la-request_body_access.lo actions/disruptive/libmodsecurity_la-allow.lo actions/disruptive/libmodsecurity_la-deny.lo actions/disruptive/libmodsecurity_la-drop.lo actions/disruptive/libmodsecurity_la-redirect.lo actions/disruptive/libmodsecurity_la-pass.lo actions/libmodsecurity_la-exec.lo actions/libmodsecurity_la-init_col.lo actions/libmodsecurity_la-log.lo actions/libmodsecurity_la-log_data.lo actions/libmodsecurity_la-maturity.lo actions/libmodsecurity_la-msg.lo actions/libmodsecurity_la-multi_match.lo actions/libmodsecurity_la-no_audit_log.lo actions/libmodsecurity_la-no_log.lo actions/libmodsecurity_la-phase.lo actions/libmodsecurity_la-rev.lo actions/libmodsecurity_la-rule_id.lo actions/libmodsecurity_la-severity.lo actions/libmodsecurity_la-set_env.lo actions/libmodsecurity_la-set_rsc.lo actions/libmodsecurity_la-set_sid.lo actions/libmodsecurity_la-set_uid.lo actions/libmodsecurity_la-set_var.lo actions/data/libmodsecurity_la-status.lo actions/libmodsecurity_la-skip.lo actions/libmodsecurity_la-skip_after.lo actions/libmodsecurity_la-tag.lo actions/transformations/libmodsecurity_la-base64_decode.lo actions/transformations/libmodsecurity_la-base64_encode.lo actions/transformations/libmodsecurity_la-base64_decode_ext.lo actions/transformations/libmodsecurity_la-cmd_line.lo actions/transformations/libmodsecurity_la-compress_whitespace.lo actions/transformations/libmodsecurity_la-css_decode.lo actions/transformations/libmodsecurity_la-escape_seq_decode.lo actions/transformations/libmodsecurity_la-hex_decode.lo actions/transformations/libmodsecurity_la-hex_encode.lo actions/transformations/libmodsecurity_la-html_entity_decode.lo actions/transformations/libmodsecurity_la-js_decode.lo actions/transformations/libmodsecurity_la-length.lo actions/transformations/libmodsecurity_la-lower_case.lo actions/transformations/libmodsecurity_la-md5.lo actions/transformations/libmodsecurity_la-none.lo actions/transformations/libmodsecurity_la-normalise_path.lo actions/transformations/libmodsecurity_la-normalise_path_win.lo actions/transformations/libmodsecurity_la-parity_even_7bit.lo actions/transformations/libmodsecurity_la-parity_odd_7bit.lo actions/transformations/libmodsecurity_la-parity_zero_7bit.lo actions/transformations/libmodsecurity_la-remove_comments.lo actions/transformations/libmodsecurity_la-remove_comments_char.lo actions/transformations/libmodsecurity_la-remove_nulls.lo actions/transformations/libmodsecurity_la-remove_whitespace.lo actions/transformations/libmodsecurity_la-replace_comments.lo actions/transformations/libmodsecurity_la-replace_nulls.lo actions/transformations/libmodsecurity_la-sha1.lo actions/transformations/libmodsecurity_la-sql_hex_decode.lo actions/transformations/libmodsecurity_la-transformation.lo actions/transformations/libmodsecurity_la-trim.lo actions/transformations/libmodsecurity_la-trim_left.lo actions/transformations/libmodsecurity_la-trim_right.lo actions/transformations/libmodsecurity_la-upper_case.lo actions/transformations/libmodsecurity_la-url_decode.lo actions/transformations/libmodsecurity_la-url_decode_uni.lo actions/transformations/libmodsecurity_la-url_encode.lo actions/transformations/libmodsecurity_la-utf8_to_unicode.lo actions/libmodsecurity_la-ver.lo actions/libmodsecurity_la-xmlns.lo engine/libmodsecurity_la-lua.lo collection/libmodsecurity_la-collections.lo collection/backend/libmodsecurity_la-in_memory-per_process.lo collection/backend/libmodsecurity_la-lmdb.lo operators/libmodsecurity_la-begins_with.lo operators/libmodsecurity_la-contains.lo operators/libmodsecurity_la-contains_word.lo operators/libmodsecurity_la-detect_sqli.lo operators/libmodsecurity_la-detect_xss.lo operators/libmodsecurity_la-ends_with.lo operators/libmodsecurity_la-eq.lo operators/libmodsecurity_la-fuzzy_hash.lo operators/libmodsecurity_la-ge.lo operators/libmodsecurity_la-geo_lookup.lo operators/libmodsecurity_la-gsblookup.lo operators/libmodsecurity_la-gt.lo operators/libmodsecurity_la-inspect_file.lo operators/libmodsecurity_la-ip_match.lo operators/libmodsecurity_la-ip_match_f.lo operators/libmodsecurity_la-ip_match_from_file.lo operators/libmodsecurity_la-le.lo operators/libmodsecurity_la-lt.lo operators/libmodsecurity_la-no_match.lo operators/libmodsecurity_la-operator.lo operators/libmodsecurity_la-pm.lo operators/libmodsecurity_la-pm_f.lo operators/libmodsecurity_la-pm_from_file.lo operators/libmodsecurity_la-rbl.lo operators/libmodsecurity_la-rsub.lo operators/libmodsecurity_la-rx.lo operators/libmodsecurity_la-rx_global.lo operators/libmodsecurity_la-str_eq.lo operators/libmodsecurity_la-str_match.lo operators/libmodsecurity_la-validate_byte_range.lo operators/libmodsecurity_la-validate_dtd.lo operators/libmodsecurity_la-validate_hash.lo operators/libmodsecurity_la-validate_schema.lo operators/libmodsecurity_la-validate_url_encoding.lo operators/libmodsecurity_la-validate_utf8_encoding.lo operators/libmodsecurity_la-verify_cc.lo operators/libmodsecurity_la-verify_cpf.lo operators/libmodsecurity_la-verify_ssn.lo operators/libmodsecurity_la-verify_svnr.lo operators/libmodsecurity_la-within.lo operators/libmodsecurity_la-unconditional_match.lo utils/libmodsecurity_la-acmp.lo utils/libmodsecurity_la-base64.lo utils/libmodsecurity_la-decode.lo utils/libmodsecurity_la-geo_lookup.lo utils/libmodsecurity_la-https_client.lo utils/libmodsecurity_la-ip_tree.lo utils/libmodsecurity_la-md5.lo utils/libmodsecurity_la-msc_tree.lo utils/libmodsecurity_la-random.lo utils/libmodsecurity_la-regex.lo utils/libmodsecurity_la-sha1.lo utils/libmodsecurity_la-string.lo utils/libmodsecurity_la-system.lo utils/libmodsecurity_la-shared_files.lo variables/libmodsecurity_la-duration.lo variables/libmodsecurity_la-env.lo variables/libmodsecurity_la-highest_severity.lo variables/libmodsecurity_la-modsec_build.lo variables/libmodsecurity_la-remote_user.lo variables/libmodsecurity_la-rule.lo variables/libmodsecurity_la-time.lo variables/libmodsecurity_la-time_day.lo variables/libmodsecurity_la-time_epoch.lo variables/libmodsecurity_la-time_hour.lo variables/libmodsecurity_la-time_min.lo variables/libmodsecurity_la-time_mon.lo variables/libmodsecurity_la-time_sec.lo variables/libmodsecurity_la-time_wday.lo variables/libmodsecurity_la-time_year.lo variables/libmodsecurity_la-tx.lo variables/libmodsecurity_la-variable.lo variables/libmodsecurity_la-xml.lo -lcurl -lGeoIP -lrt -lxml2 -llmdb ../others/libinjection.la ../others/libmbedtls.la -lpcre -lpcre -lyajl \r\nlibtool: link: g++ -fPIC -DPIC -shared -nostdlib /usr/lib/gcc/x86_64-linux-gnu/10/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/10/crtbeginS.o parser/.libs/libmodsecurity_la-seclang-parser.o parser/.libs/libmodsecurity_la-seclang-scanner.o parser/.libs/libmodsecurity_la-driver.o .libs/libmodsecurity_la-transaction.o .libs/libmodsecurity_la-anchored_set_variable.o .libs/libmodsecurity_la-anchored_variable.o audit_log/.libs/libmodsecurity_la-audit_log.o audit_log/writer/.libs/libmodsecurity_la-writer.o audit_log/writer/.libs/libmodsecurity_la-https.o audit_log/writer/.libs/libmodsecurity_la-serial.o audit_log/writer/.libs/libmodsecurity_la-parallel.o .libs/libmodsecurity_la-modsecurity.o .libs/libmodsecurity_la-rules_set.o .libs/libmodsecurity_la-rules_set_phases.o .libs/libmodsecurity_la-rules_set_properties.o debug_log/.libs/libmodsecurity_la-debug_log.o debug_log/.libs/libmodsecurity_la-debug_log_writer.o .libs/libmodsecurity_la-run_time_string.o .libs/libmodsecurity_la-rule.o .libs/libmodsecurity_la-rule_unconditional.o .libs/libmodsecurity_la-rule_with_actions.o .libs/libmodsecurity_la-rule_with_operator.o .libs/libmodsecurity_la-rule_message.o .libs/libmodsecurity_la-rule_script.o .libs/libmodsecurity_la-unique_id.o .libs/libmodsecurity_la-rules_exceptions.o request_body_processor/.libs/libmodsecurity_la-multipart.o request_body_processor/.libs/libmodsecurity_la-xml.o request_body_processor/.libs/libmodsecurity_la-json.o actions/.libs/libmodsecurity_la-accuracy.o actions/.libs/libmodsecurity_la-action.o actions/.libs/libmodsecurity_la-audit_log.o actions/.libs/libmodsecurity_la-block.o actions/.libs/libmodsecurity_la-capture.o actions/.libs/libmodsecurity_la-chain.o actions/ctl/.libs/libmodsecurity_la-audit_log_parts.o actions/ctl/.libs/libmodsecurity_la-rule_engine.o actions/ctl/.libs/libmodsecurity_la-request_body_processor_json.o actions/ctl/.libs/libmodsecurity_la-request_body_processor_xml.o actions/ctl/.libs/libmodsecurity_la-request_body_processor_urlencoded.o actions/ctl/.libs/libmodsecurity_la-rule_remove_target_by_tag.o actions/ctl/.libs/libmodsecurity_la-rule_remove_target_by_id.o actions/ctl/.libs/libmodsecurity_la-rule_remove_by_id.o actions/ctl/.libs/libmodsecurity_la-rule_remove_by_tag.o actions/ctl/.libs/libmodsecurity_la-request_body_access.o actions/disruptive/.libs/libmodsecurity_la-allow.o actions/disruptive/.libs/libmodsecurity_la-deny.o actions/disruptive/.libs/libmodsecurity_la-drop.o actions/disruptive/.libs/libmodsecurity_la-redirect.o actions/disruptive/.libs/libmodsecurity_la-pass.o actions/.libs/libmodsecurity_la-exec.o actions/.libs/libmodsecurity_la-init_col.o actions/.libs/libmodsecurity_la-log.o actions/.libs/libmodsecurity_la-log_data.o actions/.libs/libmodsecurity_la-maturity.o actions/.libs/libmodsecurity_la-msg.o actions/.libs/libmodsecurity_la-multi_match.o actions/.libs/libmodsecurity_la-no_audit_log.o actions/.libs/libmodsecurity_la-no_log.o actions/.libs/libmodsecurity_la-phase.o actions/.libs/libmodsecurity_la-rev.o actions/.libs/libmodsecurity_la-rule_id.o actions/.libs/libmodsecurity_la-severity.o actions/.libs/libmodsecurity_la-set_env.o actions/.libs/libmodsecurity_la-set_rsc.o actions/.libs/libmodsecurity_la-set_sid.o actions/.libs/libmodsecurity_la-set_uid.o actions/.libs/libmodsecurity_la-set_var.o actions/data/.libs/libmodsecurity_la-status.o actions/.libs/libmodsecurity_la-skip.o actions/.libs/libmodsecurity_la-skip_after.o actions/.libs/libmodsecurity_la-tag.o actions/transformations/.libs/libmodsecurity_la-base64_decode.o actions/transformations/.libs/libmodsecurity_la-base64_encode.o actions/transformations/.libs/libmodsecurity_la-base64_decode_ext.o actions/transformations/.libs/libmodsecurity_la-cmd_line.o actions/transformations/.libs/libmodsecurity_la-compress_whitespace.o actions/transformations/.libs/libmodsecurity_la-css_decode.o actions/transformations/.libs/libmodsecurity_la-escape_seq_decode.o actions/transformations/.libs/libmodsecurity_la-hex_decode.o actions/transformations/.libs/libmodsecurity_la-hex_encode.o actions/transformations/.libs/libmodsecurity_la-html_entity_decode.o actions/transformations/.libs/libmodsecurity_la-js_decode.o actions/transformations/.libs/libmodsecurity_la-length.o actions/transformations/.libs/libmodsecurity_la-lower_case.o actions/transformations/.libs/libmodsecurity_la-md5.o actions/transformations/.libs/libmodsecurity_la-none.o actions/transformations/.libs/libmodsecurity_la-normalise_path.o actions/transformations/.libs/libmodsecurity_la-normalise_path_win.o actions/transformations/.libs/libmodsecurity_la-parity_even_7bit.o actions/transformations/.libs/libmodsecurity_la-parity_odd_7bit.o actions/transformations/.libs/libmodsecurity_la-parity_zero_7bit.o actions/transformations/.libs/libmodsecurity_la-remove_comments.o actions/transformations/.libs/libmodsecurity_la-remove_comments_char.o actions/transformations/.libs/libmodsecurity_la-remove_nulls.o actions/transformations/.libs/libmodsecurity_la-remove_whitespace.o actions/transformations/.libs/libmodsecurity_la-replace_comments.o actions/transformations/.libs/libmodsecurity_la-replace_nulls.o actions/transformations/.libs/libmodsecurity_la-sha1.o actions/transformations/.libs/libmodsecurity_la-sql_hex_decode.o actions/transformations/.libs/libmodsecurity_la-transformation.o actions/transformations/.libs/libmodsecurity_la-trim.o actions/transformations/.libs/libmodsecurity_la-trim_left.o actions/transformations/.libs/libmodsecurity_la-trim_right.o actions/transformations/.libs/libmodsecurity_la-upper_case.o actions/transformations/.libs/libmodsecurity_la-url_decode.o actions/transformations/.libs/libmodsecurity_la-url_decode_uni.o actions/transformations/.libs/libmodsecurity_la-url_encode.o actions/transformations/.libs/libmodsecurity_la-utf8_to_unicode.o actions/.libs/libmodsecurity_la-ver.o actions/.libs/libmodsecurity_la-xmlns.o engine/.libs/libmodsecurity_la-lua.o collection/.libs/libmodsecurity_la-collections.o collection/backend/.libs/libmodsecurity_la-in_memory-per_process.o collection/backend/.libs/libmodsecurity_la-lmdb.o operators/.libs/libmodsecurity_la-begins_with.o operators/.libs/libmodsecurity_la-contains.o operators/.libs/libmodsecurity_la-contains_word.o operators/.libs/libmodsecurity_la-detect_sqli.o operators/.libs/libmodsecurity_la-detect_xss.o operators/.libs/libmodsecurity_la-ends_with.o operators/.libs/libmodsecurity_la-eq.o operators/.libs/libmodsecurity_la-fuzzy_hash.o operators/.libs/libmodsecurity_la-ge.o operators/.libs/libmodsecurity_la-geo_lookup.o operators/.libs/libmodsecurity_la-gsblookup.o operators/.libs/libmodsecurity_la-gt.o operators/.libs/libmodsecurity_la-inspect_file.o operators/.libs/libmodsecurity_la-ip_match.o operators/.libs/libmodsecurity_la-ip_match_f.o operators/.libs/libmodsecurity_la-ip_match_from_file.o operators/.libs/libmodsecurity_la-le.o operators/.libs/libmodsecurity_la-lt.o operators/.libs/libmodsecurity_la-no_match.o operators/.libs/libmodsecurity_la-operator.o operators/.libs/libmodsecurity_la-pm.o operators/.libs/libmodsecurity_la-pm_f.o operators/.libs/libmodsecurity_la-pm_from_file.o operators/.libs/libmodsecurity_la-rbl.o operators/.libs/libmodsecurity_la-rsub.o operators/.libs/libmodsecurity_la-rx.o operators/.libs/libmodsecurity_la-rx_global.o operators/.libs/libmodsecurity_la-str_eq.o operators/.libs/libmodsecurity_la-str_match.o operators/.libs/libmodsecurity_la-validate_byte_range.o operators/.libs/libmodsecurity_la-validate_dtd.o operators/.libs/libmodsecurity_la-validate_hash.o operators/.libs/libmodsecurity_la-validate_schema.o operators/.libs/libmodsecurity_la-validate_url_encoding.o operators/.libs/libmodsecurity_la-validate_utf8_encoding.o operators/.libs/libmodsecurity_la-verify_cc.o operators/.libs/libmodsecurity_la-verify_cpf.o operators/.libs/libmodsecurity_la-verify_ssn.o operators/.libs/libmodsecurity_la-verify_svnr.o operators/.libs/libmodsecurity_la-within.o operators/.libs/libmodsecurity_la-unconditional_match.o utils/.libs/libmodsecurity_la-acmp.o utils/.libs/libmodsecurity_la-base64.o utils/.libs/libmodsecurity_la-decode.o utils/.libs/libmodsecurity_la-geo_lookup.o utils/.libs/libmodsecurity_la-https_client.o utils/.libs/libmodsecurity_la-ip_tree.o utils/.libs/libmodsecurity_la-md5.o utils/.libs/libmodsecurity_la-msc_tree.o utils/.libs/libmodsecurity_la-random.o utils/.libs/libmodsecurity_la-regex.o utils/.libs/libmodsecurity_la-sha1.o utils/.libs/libmodsecurity_la-string.o utils/.libs/libmodsecurity_la-system.o utils/.libs/libmodsecurity_la-shared_files.o variables/.libs/libmodsecurity_la-duration.o variables/.libs/libmodsecurity_la-env.o variables/.libs/libmodsecurity_la-highest_severity.o variables/.libs/libmodsecurity_la-modsec_build.o variables/.libs/libmodsecurity_la-remote_user.o variables/.libs/libmodsecurity_la-rule.o variables/.libs/libmodsecurity_la-time.o variables/.libs/libmodsecurity_la-time_day.o variables/.libs/libmodsecurity_la-time_epoch.o variables/.libs/libmodsecurity_la-time_hour.o variables/.libs/libmodsecurity_la-time_min.o variables/.libs/libmodsecurity_la-time_mon.o variables/.libs/libmodsecurity_la-time_sec.o variables/.libs/libmodsecurity_la-time_wday.o variables/.libs/libmodsecurity_la-time_year.o variables/.libs/libmodsecurity_la-tx.o variables/.libs/libmodsecurity_la-variable.o variables/.libs/libmodsecurity_la-xml.o -Wl,--whole-archive ../others/.libs/libinjection.a ../others/.libs/libmbedtls.a -Wl,--no-whole-archive -L/usr/lib/x86_64-linux-gnu -lcurl -lGeoIP -lrt -lxml2 -llmdb -lpcre -lyajl -L/usr/lib/gcc/x86_64-linux-gnu/10 -L/usr/lib/gcc/x86_64-linux-gnu/10/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/10/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/../lib -L/usr/lib/gcc/x86_64-linux-gnu/10/../../.. -lstdc++ -lm -lc -lgcc_s /usr/lib/gcc/x86_64-linux-gnu/10/crtendS.o /usr/lib/gcc/x86_64-linux-gnu/10/../../../x86_64-linux-gnu/crtn.o -g -O2 -Wl,-soname -Wl,libmodsecurity.so.3 -o .libs/libmodsecurity.so.3.0.5\r\nlibtool: link: (cd \".libs\" && rm -f \"libmodsecurity.so.3\" && ln -s \"libmodsecurity.so.3.0.5\" \"libmodsecurity.so.3\")\r\nlibtool: link: (cd \".libs\" && rm -f \"libmodsecurity.so\" && ln -s \"libmodsecurity.so.3.0.5\" \"libmodsecurity.so\")\r\nlibtool: link: (cd .libs/libmodsecurity.lax/libinjection.a && ar x \"/home/user/ModSecurity/src/../others/.libs/libinjection.a\")\r\nlibtool: link: (cd .libs/libmodsecurity.lax/libmbedtls.a && ar x \"/home/user/ModSecurity/src/../others/.libs/libmbedtls.a\")\r\ncopying selected object files to avoid basename conflicts...\r\nlibtool: link: ln actions/libmodsecurity_la-audit_log.o .libs/libmodsecurity.lax/lt1-libmodsecurity_la-audit_log.o || cp actions/libmodsecurity_la-audit_log.o .libs/libmodsecurity.lax/lt1-libmodsecurity_la-audit_log.o\r\nlibtool: link: ln utils/libmodsecurity_la-geo_lookup.o .libs/libmodsecurity.lax/lt2-libmodsecurity_la-geo_lookup.o || cp utils/libmodsecurity_la-geo_lookup.o .libs/libmodsecurity.lax/lt2-libmodsecurity_la-geo_lookup.o\r\nlibtool: link: ln utils/libmodsecurity_la-md5.o .libs/libmodsecurity.lax/lt3-libmodsecurity_la-md5.o || cp utils/libmodsecurity_la-md5.o .libs/libmodsecurity.lax/lt3-libmodsecurity_la-md5.o\r\nlibtool: link: ln utils/libmodsecurity_la-sha1.o .libs/libmodsecurity.lax/lt4-libmodsecurity_la-sha1.o || cp utils/libmodsecurity_la-sha1.o .libs/libmodsecurity.lax/lt4-libmodsecurity_la-sha1.o\r\nlibtool: link: ln variables/libmodsecurity_la-rule.o .libs/libmodsecurity.lax/lt5-libmodsecurity_la-rule.o || cp variables/libmodsecurity_la-rule.o .libs/libmodsecurity.lax/lt5-libmodsecurity_la-rule.o\r\nlibtool: link: ln variables/libmodsecurity_la-xml.o .libs/libmodsecurity.lax/lt6-libmodsecurity_la-xml.o || cp variables/libmodsecurity_la-xml.o .libs/libmodsecurity.lax/lt6-libmodsecurity_la-xml.o\r\nlibtool: link: ar cr .libs/libmodsecurity.a parser/libmodsecurity_la-seclang-parser.o parser/libmodsecurity_la-seclang-scanner.o parser/libmodsecurity_la-driver.o libmodsecurity_la-transaction.o libmodsecurity_la-anchored_set_variable.o libmodsecurity_la-anchored_variable.o audit_log/libmodsecurity_la-audit_log.o audit_log/writer/libmodsecurity_la-writer.o audit_log/writer/libmodsecurity_la-https.o audit_log/writer/libmodsecurity_la-serial.o audit_log/writer/libmodsecurity_la-parallel.o libmodsecurity_la-modsecurity.o libmodsecurity_la-rules_set.o libmodsecurity_la-rules_set_phases.o libmodsecurity_la-rules_set_properties.o debug_log/libmodsecurity_la-debug_log.o debug_log/libmodsecurity_la-debug_log_writer.o libmodsecurity_la-run_time_string.o libmodsecurity_la-rule.o libmodsecurity_la-rule_unconditional.o libmodsecurity_la-rule_with_actions.o libmodsecurity_la-rule_with_operator.o libmodsecurity_la-rule_message.o libmodsecurity_la-rule_script.o libmodsecurity_la-unique_id.o libmodsecurity_la-rules_exceptions.o request_body_processor/libmodsecurity_la-multipart.o request_body_processor/libmodsecurity_la-xml.o request_body_processor/libmodsecurity_la-json.o actions/libmodsecurity_la-accuracy.o actions/libmodsecurity_la-action.o .libs/libmodsecurity.lax/lt1-libmodsecurity_la-audit_log.o actions/libmodsecurity_la-block.o actions/libmodsecurity_la-capture.o actions/libmodsecurity_la-chain.o actions/ctl/libmodsecurity_la-audit_log_parts.o actions/ctl/libmodsecurity_la-rule_engine.o actions/ctl/libmodsecurity_la-request_body_processor_json.o actions/ctl/libmodsecurity_la-request_body_processor_xml.o actions/ctl/libmodsecurity_la-request_body_processor_urlencoded.o actions/ctl/libmodsecurity_la-rule_remove_target_by_tag.o actions/ctl/libmodsecurity_la-rule_remove_target_by_id.o actions/ctl/libmodsecurity_la-rule_remove_by_id.o actions/ctl/libmodsecurity_la-rule_remove_by_tag.o actions/ctl/libmodsecurity_la-request_body_access.o actions/disruptive/libmodsecurity_la-allow.o actions/disruptive/libmodsecurity_la-deny.o actions/disruptive/libmodsecurity_la-drop.o actions/disruptive/libmodsecurity_la-redirect.o actions/disruptive/libmodsecurity_la-pass.o actions/libmodsecurity_la-exec.o actions/libmodsecurity_la-init_col.o actions/libmodsecurity_la-log.o actions/libmodsecurity_la-log_data.o actions/libmodsecurity_la-maturity.o actions/libmodsecurity_la-msg.o actions/libmodsecurity_la-multi_match.o actions/libmodsecurity_la-no_audit_log.o actions/libmodsecurity_la-no_log.o actions/libmodsecurity_la-phase.o actions/libmodsecurity_la-rev.o actions/libmodsecurity_la-rule_id.o actions/libmodsecurity_la-severity.o actions/libmodsecurity_la-set_env.o actions/libmodsecurity_la-set_rsc.o actions/libmodsecurity_la-set_sid.o actions/libmodsecurity_la-set_uid.o actions/libmodsecurity_la-set_var.o actions/data/libmodsecurity_la-status.o actions/libmodsecurity_la-skip.o actions/libmodsecurity_la-skip_after.o actions/libmodsecurity_la-tag.o actions/transformations/libmodsecurity_la-base64_decode.o actions/transformations/libmodsecurity_la-base64_encode.o actions/transformations/libmodsecurity_la-base64_decode_ext.o actions/transformations/libmodsecurity_la-cmd_line.o actions/transformations/libmodsecurity_la-compress_whitespace.o actions/transformations/libmodsecurity_la-css_decode.o actions/transformations/libmodsecurity_la-escape_seq_decode.o actions/transformations/libmodsecurity_la-hex_decode.o actions/transformations/libmodsecurity_la-hex_encode.o actions/transformations/libmodsecurity_la-html_entity_decode.o actions/transformations/libmodsecurity_la-js_decode.o actions/transformations/libmodsecurity_la-length.o actions/transformations/libmodsecurity_la-lower_case.o actions/transformations/libmodsecurity_la-md5.o actions/transformations/libmodsecurity_la-none.o actions/transformations/libmodsecurity_la-normalise_path.o actions/transformations/libmodsecurity_la-normalise_path_win.o actions/transformations/libmodsecurity_la-parity_even_7bit.o actions/transformations/libmodsecurity_la-parity_odd_7bit.o actions/transformations/libmodsecurity_la-parity_zero_7bit.o actions/transformations/libmodsecurity_la-remove_comments.o actions/transformations/libmodsecurity_la-remove_comments_char.o actions/transformations/libmodsecurity_la-remove_nulls.o actions/transformations/libmodsecurity_la-remove_whitespace.o actions/transformations/libmodsecurity_la-replace_comments.o actions/transformations/libmodsecurity_la-replace_nulls.o actions/transformations/libmodsecurity_la-sha1.o actions/transformations/libmodsecurity_la-sql_hex_decode.o actions/transformations/libmodsecurity_la-transformation.o actions/transformations/libmodsecurity_la-trim.o actions/transformations/libmodsecurity_la-trim_left.o actions/transformations/libmodsecurity_la-trim_right.o actions/transformations/libmodsecurity_la-upper_case.o actions/transformations/libmodsecurity_la-url_decode.o actions/transformations/libmodsecurity_la-url_decode_uni.o actions/transformations/libmodsecurity_la-url_encode.o actions/transformations/libmodsecurity_la-utf8_to_unicode.o actions/libmodsecurity_la-ver.o actions/libmodsecurity_la-xmlns.o engine/libmodsecurity_la-lua.o collection/libmodsecurity_la-collections.o collection/backend/libmodsecurity_la-in_memory-per_process.o collection/backend/libmodsecurity_la-lmdb.o operators/libmodsecurity_la-begins_with.o operators/libmodsecurity_la-contains.o operators/libmodsecurity_la-contains_word.o operators/libmodsecurity_la-detect_sqli.o operators/libmodsecurity_la-detect_xss.o operators/libmodsecurity_la-ends_with.o operators/libmodsecurity_la-eq.o operators/libmodsecurity_la-fuzzy_hash.o operators/libmodsecurity_la-ge.o operators/libmodsecurity_la-geo_lookup.o operators/libmodsecurity_la-gsblookup.o operators/libmodsecurity_la-gt.o operators/libmodsecurity_la-inspect_file.o operators/libmodsecurity_la-ip_match.o operators/libmodsecurity_la-ip_match_f.o operators/libmodsecurity_la-ip_match_from_file.o operators/libmodsecurity_la-le.o operators/libmodsecurity_la-lt.o operators/libmodsecurity_la-no_match.o operators/libmodsecurity_la-operator.o operators/libmodsecurity_la-pm.o operators/libmodsecurity_la-pm_f.o operators/libmodsecurity_la-pm_from_file.o operators/libmodsecurity_la-rbl.o operators/libmodsecurity_la-rsub.o operators/libmodsecurity_la-rx.o operators/libmodsecurity_la-rx_global.o operators/libmodsecurity_la-str_eq.o operators/libmodsecurity_la-str_match.o operators/libmodsecurity_la-validate_byte_range.o operators/libmodsecurity_la-validate_dtd.o operators/libmodsecurity_la-validate_hash.o operators/libmodsecurity_la-validate_schema.o operators/libmodsecurity_la-validate_url_encoding.o operators/libmodsecurity_la-validate_utf8_encoding.o operators/libmodsecurity_la-verify_cc.o operators/libmodsecurity_la-verify_cpf.o operators/libmodsecurity_la-verify_ssn.o operators/libmodsecurity_la-verify_svnr.o operators/libmodsecurity_la-within.o operators/libmodsecurity_la-unconditional_match.o utils/libmodsecurity_la-acmp.o utils/libmodsecurity_la-base64.o utils/libmodsecurity_la-decode.o .libs/libmodsecurity.lax/lt2-libmodsecurity_la-geo_lookup.o utils/libmodsecurity_la-https_client.o utils/libmodsecurity_la-ip_tree.o .libs/libmodsecurity.lax/lt3-libmodsecurity_la-md5.o utils/libmodsecurity_la-msc_tree.o utils/libmodsecurity_la-random.o utils/libmodsecurity_la-regex.o .libs/libmodsecurity.lax/lt4-libmodsecurity_la-sha1.o utils/libmodsecurity_la-string.o utils/libmodsecurity_la-system.o utils/libmodsecurity_la-shared_files.o variables/libmodsecurity_la-duration.o variables/libmodsecurity_la-env.o variables/libmodsecurity_la-highest_severity.o variables/libmodsecurity_la-modsec_build.o variables/libmodsecurity_la-remote_user.o .libs/libmodsecurity.lax/lt5-libmodsecurity_la-rule.o variables/libmodsecurity_la-time.o variables/libmodsecurity_la-time_day.o variables/libmodsecurity_la-time_epoch.o variables/libmodsecurity_la-time_hour.o variables/libmodsecurity_la-time_min.o variables/libmodsecurity_la-time_mon.o variables/libmodsecurity_la-time_sec.o variables/libmodsecurity_la-time_wday.o variables/libmodsecurity_la-time_year.o variables/libmodsecurity_la-tx.o variables/libmodsecurity_la-variable.o .libs/libmodsecurity.lax/lt6-libmodsecurity_la-xml.o .libs/libmodsecurity.lax/libinjection.a/libinjection_html5.o .libs/libmodsecurity.lax/libinjection.a/libinjection_sqli.o .libs/libmodsecurity.lax/libinjection.a/libinjection_xss.o .libs/libmodsecurity.lax/libmbedtls.a/libmbedtls_la-base64.o .libs/libmodsecurity.lax/libmbedtls.a/libmbedtls_la-md5.o .libs/libmodsecurity.lax/libmbedtls.a/libmbedtls_la-sha1.o\r\ncorrupted double-linked list\r\n../libtool: line 1733: 84059 Aborted (core dumped) ar cr .libs/libmodsecurity.a parser/libmodsecurity_la-seclang-parser.o parser/libmodsecurity_la-seclang-scanner.o parser/libmodsecurity_la-driver.o libmodsecurity_la-transaction.o libmodsecurity_la-anchored_set_variable.o libmodsecurity_la-anchored_variable.o audit_log/libmodsecurity_la-audit_log.o audit_log/writer/libmodsecurity_la-writer.o audit_log/writer/libmodsecurity_la-https.o audit_log/writer/libmodsecurity_la-serial.o audit_log/writer/libmodsecurity_la-parallel.o libmodsecurity_la-modsecurity.o libmodsecurity_la-rules_set.o libmodsecurity_la-rules_set_phases.o libmodsecurity_la-rules_set_properties.o debug_log/libmodsecurity_la-debug_log.o debug_log/libmodsecurity_la-debug_log_writer.o libmodsecurity_la-run_time_string.o libmodsecurity_la-rule.o libmodsecurity_la-rule_unconditional.o libmodsecurity_la-rule_with_actions.o libmodsecurity_la-rule_with_operator.o libmodsecurity_la-rule_message.o libmodsecurity_la-rule_script.o libmodsecurity_la-unique_id.o libmodsecurity_la-rules_exceptions.o request_body_processor/libmodsecurity_la-multipart.o request_body_processor/libmodsecurity_la-xml.o request_body_processor/libmodsecurity_la-json.o actions/libmodsecurity_la-accuracy.o actions/libmodsecurity_la-action.o .libs/libmodsecurity.lax/lt1-libmodsecurity_la-audit_log.o actions/libmodsecurity_la-block.o actions/libmodsecurity_la-capture.o actions/libmodsecurity_la-chain.o actions/ctl/libmodsecurity_la-audit_log_parts.o actions/ctl/libmodsecurity_la-rule_engine.o actions/ctl/libmodsecurity_la-request_body_processor_json.o actions/ctl/libmodsecurity_la-request_body_processor_xml.o actions/ctl/libmodsecurity_la-request_body_processor_urlencoded.o actions/ctl/libmodsecurity_la-rule_remove_target_by_tag.o actions/ctl/libmodsecurity_la-rule_remove_target_by_id.o actions/ctl/libmodsecurity_la-rule_remove_by_id.o actions/ctl/libmodsecurity_la-rule_remove_by_tag.o actions/ctl/libmodsecurity_la-request_body_access.o actions/disruptive/libmodsecurity_la-allow.o actions/disruptive/libmodsecurity_la-deny.o actions/disruptive/libmodsecurity_la-drop.o actions/disruptive/libmodsecurity_la-redirect.o actions/disruptive/libmodsecurity_la-pass.o actions/libmodsecurity_la-exec.o actions/libmodsecurity_la-init_col.o actions/libmodsecurity_la-log.o actions/libmodsecurity_la-log_data.o actions/libmodsecurity_la-maturity.o actions/libmodsecurity_la-msg.o actions/libmodsecurity_la-multi_match.o actions/libmodsecurity_la-no_audit_log.o actions/libmodsecurity_la-no_log.o actions/libmodsecurity_la-phase.o actions/libmodsecurity_la-rev.o actions/libmodsecurity_la-rule_id.o actions/libmodsecurity_la-severity.o actions/libmodsecurity_la-set_env.o actions/libmodsecurity_la-set_rsc.o actions/libmodsecurity_la-set_sid.o actions/libmodsecurity_la-set_uid.o actions/libmodsecurity_la-set_var.o actions/data/libmodsecurity_la-status.o actions/libmodsecurity_la-skip.o actions/libmodsecurity_la-skip_after.o actions/libmodsecurity_la-tag.o actions/transformations/libmodsecurity_la-base64_decode.o actions/transformations/libmodsecurity_la-base64_encode.o actions/transformations/libmodsecurity_la-base64_decode_ext.o actions/transformations/libmodsecurity_la-cmd_line.o actions/transformations/libmodsecurity_la-compress_whitespace.o actions/transformations/libmodsecurity_la-css_decode.o actions/transformations/libmodsecurity_la-escape_seq_decode.o actions/transformations/libmodsecurity_la-hex_decode.o actions/transformations/libmodsecurity_la-hex_encode.o actions/transformations/libmodsecurity_la-html_entity_decode.o actions/transformations/libmodsecurity_la-js_decode.o actions/transformations/libmodsecurity_la-length.o actions/transformations/libmodsecurity_la-lower_case.o actions/transformations/libmodsecurity_la-md5.o actions/transformations/libmodsecurity_la-none.o actions/transformations/libmodsecurity_la-normalise_path.o actions/transformations/libmodsecurity_la-normalise_path_win.o actions/transformations/libmodsecurity_la-parity_even_7bit.o actions/transformations/libmodsecurity_la-parity_odd_7bit.o actions/transformations/libmodsecurity_la-parity_zero_7bit.o actions/transformations/libmodsecurity_la-remove_comments.o actions/transformations/libmodsecurity_la-remove_comments_char.o actions/transformations/libmodsecurity_la-remove_nulls.o actions/transformations/libmodsecurity_la-remove_whitespace.o actions/transformations/libmodsecurity_la-replace_comments.o actions/transformations/libmodsecurity_la-replace_nulls.o actions/transformations/libmodsecurity_la-sha1.o actions/transformations/libmodsecurity_la-sql_hex_decode.o actions/transformations/libmodsecurity_la-transformation.o actions/transformations/libmodsecurity_la-trim.o actions/transformations/libmodsecurity_la-trim_left.o actions/transformations/libmodsecurity_la-trim_right.o actions/transformations/libmodsecurity_la-upper_case.o actions/transformations/libmodsecurity_la-url_decode.o actions/transformations/libmodsecurity_la-url_decode_uni.o actions/transformations/libmodsecurity_la-url_encode.o actions/transformations/libmodsecurity_la-utf8_to_unicode.o actions/libmodsecurity_la-ver.o actions/libmodsecurity_la-xmlns.o engine/libmodsecurity_la-lua.o collection/libmodsecurity_la-collections.o collection/backend/libmodsecurity_la-in_memory-per_process.o collection/backend/libmodsecurity_la-lmdb.o operators/libmodsecurity_la-begins_with.o operators/libmodsecurity_la-contains.o operators/libmodsecurity_la-contains_word.o operators/libmodsecurity_la-detect_sqli.o operators/libmodsecurity_la-detect_xss.o operators/libmodsecurity_la-ends_with.o operators/libmodsecurity_la-eq.o operators/libmodsecurity_la-fuzzy_hash.o operators/libmodsecurity_la-ge.o operators/libmodsecurity_la-geo_lookup.o operators/libmodsecurity_la-gsblookup.o operators/libmodsecurity_la-gt.o operators/libmodsecurity_la-inspect_file.o operators/libmodsecurity_la-ip_match.o operators/libmodsecurity_la-ip_match_f.o operators/libmodsecurity_la-ip_match_from_file.o operators/libmodsecurity_la-le.o operators/libmodsecurity_la-lt.o operators/libmodsecurity_la-no_match.o operators/libmodsecurity_la-operator.o operators/libmodsecurity_la-pm.o operators/libmodsecurity_la-pm_f.o operators/libmodsecurity_la-pm_from_file.o operators/libmodsecurity_la-rbl.o operators/libmodsecurity_la-rsub.o operators/libmodsecurity_la-rx.o operators/libmodsecurity_la-rx_global.o operators/libmodsecurity_la-str_eq.o operators/libmodsecurity_la-str_match.o operators/libmodsecurity_la-validate_byte_range.o operators/libmodsecurity_la-validate_dtd.o operators/libmodsecurity_la-validate_hash.o operators/libmodsecurity_la-validate_schema.o operators/libmodsecurity_la-validate_url_encoding.o operators/libmodsecurity_la-validate_utf8_encoding.o operators/libmodsecurity_la-verify_cc.o operators/libmodsecurity_la-verify_cpf.o operators/libmodsecurity_la-verify_ssn.o operators/libmodsecurity_la-verify_svnr.o operators/libmodsecurity_la-within.o operators/libmodsecurity_la-unconditional_match.o utils/libmodsecurity_la-acmp.o utils/libmodsecurity_la-base64.o utils/libmodsecurity_la-decode.o .libs/libmodsecurity.lax/lt2-libmodsecurity_la-geo_lookup.o utils/libmodsecurity_la-https_client.o utils/libmodsecurity_la-ip_tree.o .libs/libmodsecurity.lax/lt3-libmodsecurity_la-md5.o utils/libmodsecurity_la-msc_tree.o utils/libmodsecurity_la-random.o utils/libmodsecurity_la-regex.o .libs/libmodsecurity.lax/lt4-libmodsecurity_la-sha1.o utils/libmodsecurity_la-string.o utils/libmodsecurity_la-system.o utils/libmodsecurity_la-shared_files.o variables/libmodsecurity_la-duration.o variables/libmodsecurity_la-env.o variables/libmodsecurity_la-highest_severity.o variables/libmodsecurity_la-modsec_build.o variables/libmodsecurity_la-remote_user.o .libs/libmodsecurity.lax/lt5-libmodsecurity_la-rule.o variables/libmodsecurity_la-time.o variables/libmodsecurity_la-time_day.o variables/libmodsecurity_la-time_epoch.o variables/libmodsecurity_la-time_hour.o variables/libmodsecurity_la-time_min.o variables/libmodsecurity_la-time_mon.o variables/libmodsecurity_la-time_sec.o variables/libmodsecurity_la-time_wday.o variables/libmodsecurity_la-time_year.o variables/libmodsecurity_la-tx.o variables/libmodsecurity_la-variable.o .libs/libmodsecurity.lax/lt6-libmodsecurity_la-xml.o .libs/libmodsecurity.lax/libinjection.a/libinjection_html5.o .libs/libmodsecurity.lax/libinjection.a/libinjection_sqli.o .libs/libmodsecurity.lax/libinjection.a/libinjection_xss.o .libs/libmodsecurity.lax/libmbedtls.a/libmbedtls_la-base64.o .libs/libmodsecurity.lax/libmbedtls.a/libmbedtls_la-md5.o .libs/libmodsecurity.lax/libmbedtls.a/libmbedtls_la-sha1.o\r\nmake[3]: *** [Makefile:1812: libmodsecurity.la] Error 134\r\nmake[3]: Leaving directory '/home/user/ModSecurity/src'\r\nmake[2]: *** [Makefile:3479: all-recursive] Error 1\r\nmake[2]: Leaving directory '/home/user/ModSecurity/src'\r\nmake[1]: *** [Makefile:1224: all] Error 2\r\nmake[1]: Leaving directory '/home/user/ModSecurity/src'\r\nmake: *** [Makefile:1038: all-recursive] Error 1\r\n```\r\n\r\nCould you advice please on how to debug, fix this?", + "summary": "**Debug the Build Issue on Debian Bullseye**\n\n1. **Identify the Problem**: The build process fails with a core dump error related to `ar` during the creation of the static library `libmodsecurity.a`. The error message indicates a \"corrupted double-linked list,\" which suggests a memory corruption issue.\n\n2. **Check Dependencies**: Ensure all required dependencies are installed:\n - Install `libtool`, `autoconf`, `automake`, `pkg-config`, `g++`, `make`, and other necessary libraries.\n - Verify that the optional dependencies like `GeoIP`, `libcurl`, and `libxml2` are correctly installed.\n\n3. **Examine the Environment**:\n - Check for environment variable settings that may affect the build. Ensure `CXX`, `CC`, and `LD` are correctly configured.\n - Use a clean environment, e.g., a Docker container, to isolate issues. \n\n4. **Reproduce the Issue**: Try to reproduce the error by executing the following commands:\n ```bash\n git clone --depth 1 -b v3/master --single-branch https://github.com/SpiderLabs/ModSecurity\n cd ModSecurity\n ./configure\n make\n ```\n\n5. **Run with Debug Information**: \n - Run `make` with `VERBOSE=1` to see detailed output of the build commands being executed. This might provide clues about where the failure occurs.\n\n6. **Use Debugging Tools**:\n - Use `gdb` to analyze the core dump file generated by `ar`. Run `gdb ar core`, where `core` is the generated core dump file, and inspect the backtrace with `bt` to gather information about the crash.\n - Check for memory leaks or problems using tools like `valgrind`:\n ```bash\n valgrind --track-origins=yes --leak-check=full make\n ```\n\n7. **Modify the Build Process**:\n - If `./build.sh` is optional as per the comment in the issue, consider skipping it and directly running `./configure` and then `make`.\n - Manually clean the previous build artifacts before re-running the build process:\n ```bash\n make clean\n ```\n\n8. **Check System Compatibility**:\n - Ensure the version of `libtool` and other build tools are compatible with the ModSecurity version you are attempting to build.\n - If the issue persists, consider testing on a different version of Debian or on a different OS to see if the problem is specific to Debian Bullseye.\n\n9. **Seek Help**: If the above steps do not resolve the issue, consider reporting the problem with detailed logs and steps taken to the ModSecurity GitHub repository or seek assistance in community forums.\n\nBy following these steps, you should be able to identify the root cause of the build failure and take appropriate actions to resolve it.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2613", @@ -61533,6 +63674,7 @@ "node_id": "MDU6SXNzdWU5OTI2MTY1NTQ=", "title": "Broken Examples Between v3.04 and v3.05 + master", "body": "**Describe the bug**\r\n\r\nHi,\r\n\r\nWhile trying to run the example `simple_request` (under `examples/using_bodies_in_chunks/`), I noticed that it works fine under 3.04 but breaks under 3.05. The build steps are the same in both. Here is the difference in output. First, we have the working version (v3.04):\r\n\r\n```\r\n# ./simple_request example.conf\r\n[163121835441.179979] [] [4] Initializing transaction\r\n[163121835441.179979] [] [4] Transaction context created.\r\n[163121835441.179979] [] [4] Starting phase CONNECTION. (SecRules 0)\r\n[163121835441.179979] [] [9] This phase consists of 0 rule(s).\r\n[163121835441.179979] [] [4] Starting phase URI. (SecRules 0 + 1/2)\r\n[163121835441.179979] [/test.pl?param1=test¶2=test2] [4] Adding request argument (GET): name \"param1\", value \"test\"\r\n[163121835441.179979] [/test.pl?param1=test¶2=test2] [4] Adding request argument (GET): name \"para2\", value \"test2\"\r\n...\r\n[163121835441.179979] [/test.pl?param1=test¶2=test2] [8] Request was relevant to be saved. Parts: 4430\r\n```\r\n\r\nand then on v3.05\r\n\r\n```\r\n# ./simple_request example.conf\r\nProblems loading the rules...\r\nRules error. File: /tmp/1/ModSecurity/examples/using_bodies_in_chunks/.libs/lt-simple_request. Line: 1. Column: 8. Invalid input: ELF\r\n```\r\n\r\nI get the same error regardless of the rule file used. The same broken behavior is seen in master as well\r\n\r\nIn the above example, I'm using the default `example.conf` that comes with the example:\r\n\r\n```\r\nSecDebugLog /dev/stdout\r\nSecDebugLogLevel 9\r\nSecRule RESPONSE_BODY \"/soap:Body\" \"id:1,phase:5,deny\"\r\n```\r\n\r\nThanks\r\n\r\n**Logs and dumps**\r\n\r\nSee above\r\n\r\n**To Reproduce**\r\n\r\nSee above\r\n\r\n**Expected behavior**\r\n\r\nSee above\r\n\r\n**Additional context**\r\n\r\nSee above", + "summary": "**Investigate Broken Examples in v3.05 and master**\n\n1. **Identify the Issue**: Notice that the `simple_request` example fails in version 3.05 while it operates correctly in version 3.04. The error message indicates problems with loading rules, specifically citing an \"Invalid input: ELF\" error.\n\n2. **Compare Builds**: Ensure that the build steps for both versions (3.04 and 3.05) are identical to eliminate discrepancies that could affect functionality.\n\n3. **Check Rule Files**: Observe that the error persists across different rule files. Investigate the contents of the rule files, particularly the one used in `example.conf`, which should be valid and compatible with the new version.\n\n4. **Debug Logging**: Utilize the `SecDebugLog` and `SecDebugLogLevel` settings in the `example.conf` to collect detailed logs of the execution process to better understand where the failure occurs.\n\n5. **Reproduce the Error**: Run the command `./simple_request example.conf` to replicate the error, confirming it is consistent across environments.\n\n6. **Examine Changes Between Versions**: Review the changelog or commit history between v3.04 and v3.05 for modifications related to rule handling or initialization processes that could contribute to the failure.\n\n7. **First Steps to Resolve**:\n - Update or modify the rule files to ensure they are not causing compatibility issues.\n - Test the example with a minimal rule set to isolate the problem.\n - If the issue persists, consider reverting to v3.04 temporarily while investigating further.\n\n8. **Engage with Community**: If initial troubleshooting does not reveal a solution, report findings on the GitHub issue tracker, detailing the steps taken and inviting collaboration for resolution.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2618", @@ -61561,6 +63703,7 @@ "node_id": "I_kwDOABQmks48C61w", "title": "Modsecurity sees the HTTP \"INVALID\" method instead of \"PATCH\"", "body": "Hello,\r\n\r\nI installed modsecurity 2 on IIS 10 in reverse-proxy with the OWASP csr. \r\n\r\nFor the moment, the WAF works in DetectionOnly and therefore does not block anything. I authorized in the csr-setup.conf the HTTP **PATCH** method, but I see that when the PATCH method is used, it is blocked by Modsecurity because PATCH is replaced by **INVALID**, but the application works correctly, because IIS is authorized to use PATCH and in the IIS logs, it is well written PATCH. I don't understand why Modsecurity receives INVALID. \r\n\r\nCould you please help me?\r\n\r\n\r\nhave a nice day\r\n\r\nKP\r\n", + "summary": "Investigate the issue where ModSecurity is interpreting the HTTP PATCH method as INVALID. You are using ModSecurity 2 on IIS 10 configured as a reverse proxy with the OWASP Core Rule Set (CRS). \n\n1. Verify the ModSecurity configuration to ensure that the HTTP PATCH method is explicitly allowed. Check the `csr-setup.conf` file for any rules that might inadvertently block PATCH requests.\n\n2. Examine the ModSecurity logs to identify any rules or conditions that trigger the INVALID method response. Look for specific error messages or anomalies that occur when a PATCH request is made.\n\n3. Check the IIS configuration to confirm that it correctly handles PATCH requests. Ensure that there are no conflicting settings that might affect how ModSecurity interprets incoming requests.\n\n4. Test ModSecurity in a different mode (e.g., Block or Alert) to see if the behavior changes with PATCH requests. This can provide more insight into how ModSecurity processes the requests.\n\n5. Update ModSecurity to the latest version if not already done, as newer releases may contain bug fixes or improvements related to HTTP method handling.\n\n6. Consider enabling debug logging in ModSecurity for more detailed output during PATCH requests. This can help pinpoint the source of the issue.\n\n7. Review the OWASP CRS documentation to determine if there are known issues or specific configurations related to HTTP methods that could impact processing.\n\nBy following these steps, you can better understand why ModSecurity sees PATCH as INVALID and work towards resolving the issue.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2623", @@ -61576,8 +63719,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, - 401 + 401, + 398 ] } }, @@ -61590,6 +63733,7 @@ "node_id": "I_kwDOABQmks4_L6nE", "title": "ModSecurity should able to analyse gRPC request body.", "body": "**Expected behavior**\r\n\r\nModSecurity should able to analyse gRPC request body.\r\n\r\ngRPC is binary protocol, ModSecurity is not able to parse it and hence, not able to block simple injection.", + "summary": "Implement gRPC request body analysis in ModSecurity. \n\n1. Modify the ModSecurity parser to handle binary data formats used in gRPC.\n2. Create a decoder for gRPC messages that can convert binary data into a readable format.\n3. Integrate the decoder into the ModSecurity inspection engine to allow rule matching against decoded gRPC request bodies.\n4. Test the implementation with various gRPC payloads to ensure proper detection of injection attacks.\n5. Document the changes and update the ModSecurity ruleset to include examples for gRPC request validations.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2645", @@ -61618,6 +63762,7 @@ "node_id": "I_kwDOABQmks5ByeOf", "title": "Rules with both chain and multiMatch may produce excess writes to audit log", "body": "If a chained rule has at least one rule that includes the multiMatch action **and** the rule with the multiMatch action is not the final rule in the chain, then writes to the audit log may occur even if not all rules within the chain resulted in a match.\r\n\r\nFor example, with ```SecAuditEngine RelevantOnly``` and the following chained rule:\r\n```\r\nSecRule ARGS \"@contains y0\" \"id:1001,phase:2,t:urlDecode,t:lowercase,multimatch,log,deny,status:403,chain\"\r\nSecRule ARGS \"@contains y1\" \"t:none\"\r\n```\r\n\r\nIn this case, if only the first rule of the two-rule chain matches, as in this request\r\n\r\nand with this request: ```curl http://localhost/testget.php?a=y0```\r\n\r\n... then a write to the audit log can still occur.\r\n\r\nThe transaction is (correctly) not denied, and no other ill effects have been observed.\r\n\r\nThis has been confirmed to be longstanding behaviour in ModSecurity v3 (at least as far back as v3.0.3) rather than a regression.\r\n", + "summary": "Identify the issue of excess writes to the audit log when using chained rules with multiMatch in ModSecurity. Recognize that if a chained rule contains a multiMatch action and is not the final rule, it may lead to unnecessary audit log entries even when not all rules match.\n\nTo reproduce the problem, use the example chained rule:\n```\nSecRule ARGS \"@contains y0\" \"id:1001,phase:2,t:urlDecode,t:lowercase,multimatch,log,deny,status:403,chain\"\nSecRule ARGS \"@contains y1\" \"t:none\"\n```\nExecute a request like `curl http://localhost/testget.php?a=y0`. Observe that despite only the first rule matching, an entry is still written to the audit log.\n\nConfirm that this behavior has been persistent in ModSecurity v3 since at least version 3.0.3, indicating it's not a regression.\n\nTo address the problem, consider the following first steps:\n1. Review the logic of how audit logs are written when using chained rules with multiMatch.\n2. Assess whether modifications to the rule processing order or conditions can prevent unnecessary log entries.\n3. Explore potential updates or configurations within ModSecurity to mitigate this behavior.\n4. Develop a test plan to verify the fix across various scenarios involving chained rules and multiMatch actions.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2672", @@ -61646,6 +63791,7 @@ "node_id": "I_kwDOABQmks5EFGHW", "title": "SecAuditLogParts never logs anything for 'K' type (list of rule IDs)", "body": "**Describe the bug**\r\n\r\nI want to analyse POST body content but I don't want to print any POST body content (or any actual data snippets from the request, such as cookies etc) to the log file. So I don't want to log 'B', 'C', 'F', 'H', 'J' etc.\r\n\r\nI just want to get the time of the event and the rule ID that tripped the blocked request.\r\n\r\nI believe the 'K' type in SecAuditLogParts is meant to log the rule ID\r\n\r\nhttps://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual-(v3.x)#SecAuditLogParts\r\n\r\nHowever, in all my logs, the 'K' is always empty. The only meaningful data I can get about the rules that tripped is from 'H', but it contains too many other messages in that log entry such as personal data, which I can't put in logs.\r\n\r\n\r\n**Logs and dumps**\r\n\r\nExample:\r\n\r\n```\r\n---0s7Btvlo---A--\r\n[17/Feb/2022:02:39:54 +0000] 1645065594 10.42.0.11 0 172.17.0.3 80\r\n\r\n---0s7Btvlo---K--\r\n\r\n---0s7Btvlo---Z--\r\n```\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the behavior:\r\n\r\nSet SecAuditLogParts to 'AKZ' and you'll never get entries for 'K'.\r\n\r\n\r\n**Expected behavior**\r\n\r\nI should get the rule ID or something like that (? since it's never worked, I don't know what it looks like) when logging K.\r\n\r\n**Server**\r\n - ModSecurity version: libmodsecurity 3.0.6 (commit hash c3d7f4b560797a052681dcffb97a22bb906487cd), ModSecurity-nginx v1.0.2\r\n - WebServer Nginx 1.14.0 (the official debian package 1.14.0-0ubuntu1.9 from on Ubuntu 18.04)\r\n - OS (and distro): Ubuntu Linux 18.04 LTS\r\n\r\n\r\n**Rule Set**\r\n - public coreruleset 3.3.2\r\n", + "summary": "Investigate the issue where SecAuditLogParts does not log 'K' type entries for rule IDs. \n\n1. Confirm the correct configuration for `SecAuditLogParts` is set to 'AKZ'. This is necessary for logging the rule IDs.\n2. Verify the ModSecurity version in use is libmodsecurity 3.0.6 and ensure it's compatible with the public Core Rule Set version 3.3.2.\n3. Check the ModSecurity and Nginx integration compatibility, especially with Nginx version 1.14.0 on Ubuntu 18.04 LTS.\n4. Examine the ModSecurity logs and see if any filtering or processing might be preventing 'K' type entries from being written.\n5. Review the ModSecurity documentation regarding SecAuditLogParts to ensure that the expected behavior of 'K' is correctly understood. \n\nCreate test scenarios where different types of requests trigger various rules, and monitor the logs to see if any 'K' entries are created. If entries are still missing, consider enabling verbose logging to gather more details about the request handling process. \n\nLog findings and document any discrepancies between expected and actual behavior to facilitate further troubleshooting or bug reporting.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2684", @@ -61674,6 +63820,7 @@ "node_id": "I_kwDOABQmks5FoL4G", "title": "Unable to turn off writing to the server log even set to `nolog,noauditlog`", "body": "**Describe the bug**\r\nI have set this to return status 418 but i do not want any logs inside the nginx\r\n \r\n```\r\nSecRule REQUEST_HEADERS\"@contains vip_checking\" \"phase:2,id:70010,deny,nolog,noauditlog,status:418\"\r\n```\r\n\r\nBut it somehow show error log in nginx.\r\n\r\n```\r\n*441580 [client 10.42.0.1] ModSecurity: Access denied with code 418 (phase 2). Matched \"Operator `Contains' with parameter `vip_checking' against variable\r\n```", + "summary": "Disable logging for ModSecurity rules in Nginx configuration. Ensure the rule for returning status 418 does not generate logs. \n\nCheck the following configuration:\n```nginx\nSecRule REQUEST_HEADERS \"@contains vip_checking\" \"phase:2,id:70010,deny,nolog,noauditlog,status:418\"\n```\n\nVerify that `nolog` and `noauditlog` are correctly applied. \n\nInvestigate the Nginx error log output:\n```\n*441580 [client 10.42.0.1] ModSecurity: Access denied with code 418 (phase 2). Matched \"Operator `Contains' with parameter `vip_checking' against variable\n```\n\nThis might indicate that the logging settings are overridden elsewhere in the configuration. \n\nPossible first steps:\n1. Review the global ModSecurity configuration file for any conflicting log settings.\n2. Ensure that all ModSecurity rules are set correctly to suppress logging.\n3. Test the configuration changes by restarting Nginx and checking the logs to confirm that no entries are recorded for the specified rule.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2698", @@ -61702,6 +63849,7 @@ "node_id": "I_kwDOABQmks5FzJj-", "title": "feature request: cache transformed value", "body": "In our case (180 rules), after cache, the performance increase about 5% - 10%.\r\n\r\nCurrently, we use a map on Transactions, and the key is key + path.", + "summary": "Implement a caching mechanism for transformed values to enhance performance, specifically for instances involving multiple rules (180 in this case). Aim for a performance improvement of 5% to 10% post-caching.\n\n1. Analyze the current implementation of the map on Transactions, identifying how keys are generated using \"key + path.\"\n2. Design a caching strategy that stores the transformed values based on the same key structure.\n3. Integrate the cache into the existing codebase, ensuring it retrieves values from the cache before performing the transformation.\n4. Establish cache invalidation or expiration policies to maintain data integrity.\n5. Benchmark the performance before and after the implementation to validate the improvement.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2702", @@ -61730,6 +63878,7 @@ "node_id": "I_kwDOABQmks5F_Zb9", "title": "Cannot get REQUEST_FILENAME with %3f (encoded ?) in url path", "body": "**Describe the bug**\r\n\r\nIf the url path contains %3f, cannot get real `REQUEST_FILENAME`.\r\n\r\n\r\n\r\n\r\n**Logs and dumps**\r\n\r\n```\r\nModSecurity: Warning. Matched \"Operator `Gt' with parameter `0' against variable `REQUEST_URI_RAW' (Value: `/path1%3fpath2?query=%3f' ) [file \"...\"] [line \"1\"] [id \"1\"] [rev \"\"] [msg \"\"] [data \"\"] [severity \"0\"] [ver \"\"] [maturity \"0\"] [accuracy \"0\"] [hostname \"127.0.0.1\"] [uri \"/path1\"] [unique_id \"...\"] [ref \"v4,24t:length\"]\r\nModSecurity: Warning. Matched \"Operator `Gt' with parameter `0' against variable `REQUEST_FILENAME' (Value: `/path1' ) [file \"...\"] [line \"2\"] [id \"2\"] [rev \"\"] [msg \"\"] [data \"\"] [severity \"0\"] [ver \"\"] [maturity \"0\"] [accuracy \"0\"] [hostname \"127.0.0.1\"] [uri \"/path1\"] [unique_id \"...\"] [ref \"v4,14t:length\"]\r\n```\r\n\r\n**To Reproduce**\r\n\r\nTest for path: `/path1%3fpath2?query=%3f`\r\n\r\n**Expected behavior**\r\n\r\nReturn urldecoded or original filename.\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): ModSecurity v3.0.6\r\n\r\n**Rule Set (please complete the following information):**\r\n\r\n```\r\nSecRule REQUEST_URI_RAW \"@gt 0\" \"id:1,phase:1,t:length,pass,log,auditlog\"\r\nSecRule REQUEST_FILENAME \"@gt 0\" \"id:2,phase:1,t:length,pass,log,auditlog\"\r\n```\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n", + "summary": "Investigate issue with `REQUEST_FILENAME` when URL contains encoded question mark `%3f`. \n\n**Steps to reproduce the problem:**\n1. Access the URL: `/path1%3fpath2?query=%3f`.\n2. Observe that `REQUEST_FILENAME` does not return the expected decoded value.\n\n**Expected Outcome:**\nEnsure that the server returns the decoded or original filename for the request.\n\n**Technical Details:**\n- ModSecurity version: 3.0.6.\n- Current rules:\n ```\n SecRule REQUEST_URI_RAW \"@gt 0\" \"id:1,phase:1,t:length,pass,log,auditlog\"\n SecRule REQUEST_FILENAME \"@gt 0\" \"id:2,phase:1,t:length,pass,log,auditlog\"\n ```\n- Logs indicate that `REQUEST_URI_RAW` and `REQUEST_FILENAME` are matched against conditions but do not yield the correct filename due to encoding issues.\n\n**Initial Steps to Address the Problem:**\n1. Review ModSecurity documentation related to URL decoding and request handling.\n2. Modify rules to handle encoded characters correctly, possibly by adding a decoding transformation.\n3. Test changes with the provided URL to verify if `REQUEST_FILENAME` returns the expected output.\n4. Log the output of `REQUEST_FILENAME` after modifying the rules to confirm behavior.\n5. Consider adding unit tests to cover various encoded characters in URLs to prevent regressions.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2705", @@ -61756,6 +63905,7 @@ "node_id": "I_kwDOABQmks5IWpYk", "title": "feature request: show action payload in debug log, like ctl:xxxx", "body": "For `ctl`, there is only a name without payload.\r\n\r\nJust change\r\n\r\n```\r\n\"action: \" + *a->m_name.get());\r\n```\r\n\r\nto \r\n\r\n```\r\n\"action: \" + *a->m_name.get() + (a->m_parser_payload.empty() ? \"\" : \":\" + a->m_parser_payload));\r\n```", + "summary": "Add a feature to display action payload in the debug log for actions like `ctl:xxxx`. Modify the logging statement to include the payload if it's not empty. \n\n1. Locate the current logging code:\n```cpp\n\"action: \" + *a->m_name.get());\n```\n2. Update it to:\n```cpp\n\"action: \" + *a->m_name.get() + (a->m_parser_payload.empty() ? \"\" : \":\" + a->m_parser_payload));\n```\n3. Test the changes to ensure that the payload is correctly displayed in the debug logs for all relevant actions. \n4. Review other parts of the code that may need similar updates for consistency.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2729", @@ -61782,6 +63932,7 @@ "node_id": "I_kwDOABQmks5MG3E3", "title": "Configuration directive SecUploadFileLimit is exclusive", "body": "**Describe the bug**\r\n\r\nThe directive `SecUploadFileLimit` is exclusive, so when we expect to have a maximum of 10 files, we need to put 11 as value.\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the behavior:\r\n\r\nJust set to directive `SecUploadFileLimit` to a value 10.\r\nTry to upload 10 files and modsecurity will block the request because the limit is reached.\r\n\r\n**Expected behavior**\r\n\r\nIf the limit is set to 10, I expect to be able to upload 10 files.\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): ModSecurity v3.0.6\r\n - WebServer: nginx-1.20.2\r\n - OS (and distro): Linux, Ubuntu with Docker\r\n\r\n**Rule Set (please complete the following information):**\r\n - Running any public or commercial rule set? Public\r\n\r\nIt might be an expected behavior, but I can't find anywhere that this directive is exclusive.", + "summary": "Identify and clarify the behavior of the `SecUploadFileLimit` directive in ModSecurity, which appears to be exclusive. \n\n1. **Reproduce the issue**:\n - Set `SecUploadFileLimit` to 10.\n - Attempt to upload 10 files.\n - Observe that ModSecurity blocks the request, indicating the limit is reached.\n\n2. **Clarify expected behavior**: \n - Users expect that setting `SecUploadFileLimit` to 10 allows for the upload of 10 files, rather than 9.\n\n3. **Check documentation**: \n - Review the ModSecurity documentation to confirm if the exclusivity of the `SecUploadFileLimit` directive is explicitly stated. \n\n4. **Propose a potential fix**:\n - Consider adjusting the directive to allow for the expected number of files (i.e., setting it to 10 permits 10 uploads).\n\n5. **Evaluate impact on existing configurations**:\n - Test changes in a controlled environment to ensure they do not inadvertently affect other functionalities or security measures.\n\n6. **Document findings and update the issue**:\n - Provide a clear explanation in the GitHub issue regarding the expected behavior vs. the current functionality, including any references to documentation or community discussions about this directive.\n\n7. **Engage with the community**:\n - Seek input from other users or contributors who may have encountered similar issues or have insights regarding the directive's implementation. \n\nBy following these steps, you can systematically address the reported behavior of the `SecUploadFileLimit` directive.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2766", @@ -61810,6 +63961,7 @@ "node_id": "I_kwDOABQmks5N1l1e", "title": "configure with --with-lua= / parameter seems to be not used", "body": "**Describe the bug**\r\nWhen compiling with a provided, local compiled LUA (5.4.4), the --with-lua=/data/lua-5.4.4 seems to be ignored and results in \"not found\"\r\n-> it does not seem to look at the path provided\r\n\r\n**Logs and dumps**\r\n...\r\nconfigure: LUA library found at: /usr/lib64//liblua-5.3.so\r\n*** LOOKING AT PATH: /opt/local\r\nconfigure: LUA library found at: /usr/lib64//liblua-5.3.so\r\n*** LOOKING AT PATH: /usr/lib/lua5.3/liblua\r\nconfigure: LUA library found at: /usr/lib64//liblua-5.3.so\r\n*** LOOKING AT PATH: /usr/lib/lua5.2/liblua\r\nconfigure: LUA library found at: /usr/lib64//liblua-5.3.so\r\nconfigure: LUA library was not found\r\n...\r\n-> no --with-lua=/data/lua-5.4.4 PATH inspected/used\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nconfigure --with-lua=/some/path \r\n\r\n**Expected behavior**\r\n*** LOOKING AT PATH: /some/path \r\nfinding LUA there\r\n\r\n\r\nA clear and concise description of what you expected to happen.\r\nExpecting to find and use locally self-compiled LUA (5.4.4)\r\n\r\n**Server (please complete the following information):**\r\nModSecurity v3.0.7\r\nRHEL8\r\n\r\nInitially i thought that the issue is that Lua doesnt build shared libs (.so) -> but even when building with shared libs (using patch from https://www.linuxfromscratch.org/patches/blfs/svn/lua-5.4.4-shared_library-1.patch) -> the behaviour does not change. \r\n", + "summary": "Investigate the issue with the `--with-lua=/` parameter not being recognized during the configuration process. \n\n1. **Reproduce the Issue**: Use the command `./configure --with-lua=/data/lua-5.4.4` to trigger the problem and observe the configuration logs. Confirm that the expected Lua path is not being inspected.\n\n2. **Check Configuration Logic**: Review the `configure` script to ensure that it correctly parses the `--with-lua` parameter. Look for sections in the script that handle the detection of Lua libraries. \n\n3. **Inspect Library Paths**: Verify that the local Lua installation at `/data/lua-5.4.4` includes shared libraries. Check the presence of `liblua.so` in the expected directories.\n\n4. **Test Shared Library Build**: If the issue persists, ensure that Lua is built with shared libraries enabled. Apply the patch from [Linux From Scratch](https://www.linuxfromscratch.org/patches/blfs/svn/lua-5.4.4-shared_library-1.patch) to build shared libraries and verify the output.\n\n5. **Modify Configure Script**: If needed, adjust the `configure` script to explicitly check the provided `--with-lua` path for library files. Add debug output statements to track the library search process.\n\n6. **Verify Environment Variables**: Check if any environment variables (e.g., `LUA_DIR`, `LUA_LIB`) may be interfering with the configuration process. \n\nBy following these steps, you can identify and address the issue with the Lua library detection in the configure script.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2773", @@ -61838,6 +63990,7 @@ "node_id": "I_kwDOABQmks5OmIQb", "title": "Repeated headers cause problems in json audit logs", "body": "**Describe the bug**\r\n\r\nCurrent implementation of json audit logs generates log entries with duplicated keys in the event of repeated headers in either the request or response. This is _technically_ not _invalid_ json per [RFC 7159 Section 4](https://datatracker.ietf.org/doc/html/rfc7159#section-4), but most implementations silently drop all-but-first or all-but-last instances of duplicated keys. In [some cases](https://javadoc.io/static/com.google.protobuf/protobuf-java-util/3.11.1/com/google/protobuf/util/Structs.html) however, the behavior is undefined and an error is thrown. I found [this issue](https://github.com/SpiderLabs/ModSecurity-nginx/issues/230) in the nginx repo outlining a similar difficulty. The prevailing idea over there seemed to be to convert repeated header fields into a single array value containing all of the previously enumerated values. The HTTP specification states in [RFC 9110 Section 5.2](https://datatracker.ietf.org/doc/html/rfc9110#section-5.2) that field values for repeated headers should be able to be concatenated delimited by commas. I think either of these are reasonable solutions with pros and cons...\r\n\r\n*Arrays*\r\nPros:\r\n- Can easily handle different data types\r\n\r\nCons:\r\n- Less aligned with typical representations of multiple values for a single header\r\n\r\n*Comma-delimited concatenated strings*\r\nPros:\r\n- Part of RFC spec for HTTP\r\n\r\nCons:\r\n- Really only applies to string values\r\n\r\n**To Reproduce**\r\n\r\n``` curl\r\ncurl -H \"test: value\" -H \"test: another-value\" \"https://\"\r\n```\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): ModSecurity v3.0.4 with envoy connector v0.1.0\r\n - WebServer: envoy 1.19\r\n - OS (and distro): linux ubuntu\r\n\r\n\r\n**Rule Set (please complete the following information):**\r\n - Running any public or commercial rule set? OWASP_CRS\r\n - What is the version number? 3.2.0\r\n\r\n", + "summary": "Address the issue of duplicated keys in JSON audit logs caused by repeated headers in requests or responses. Ensure that the current implementation handles these scenarios correctly, as they can lead to undefined behavior in some systems.\n\n### Proposed Solutions:\n1. **Convert repeated header fields into an array**:\n - **Pros**: Can handle different data types easily.\n - **Cons**: May not align with common practices for representing multiple values for a single header.\n\n2. **Use comma-delimited concatenated strings**:\n - **Pros**: Aligns with the HTTP specification (RFC 9110).\n - **Cons**: Limited to string values.\n\n### First Steps to Tackle the Problem:\n1. Review the current JSON log generation implementation to identify how headers are processed.\n2. Determine the preferred solution (array or comma-delimited strings) based on the project's needs and specifications.\n3. Implement the chosen approach to handle repeated headers:\n - If opting for arrays, modify the log generation logic to aggregate values into an array structure.\n - If opting for comma-delimited strings, concatenate repeated header values accordingly.\n4. Test the implementation using the provided `curl` command to ensure that repeated headers are logged correctly.\n5. Validate the changes with existing unit tests and add new tests to cover edge cases involving repeated headers.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2777", @@ -61866,6 +64019,7 @@ "node_id": "I_kwDOABQmks5PFUtn", "title": "Sanitize XML (same as #360 and #587)", "body": "Hello,\r\n\r\nFirst, thanks you for all you did and do. ModSecurity is a great project.\r\n\r\nLike https://github.com/SpiderLabs/ModSecurity/issues/360 and https://github.com/SpiderLabs/ModSecurity/issues/587, we need to sanitize XML content.\r\n\r\nAfter hours of searches and tries, it's a fail. So here i am to grag some help if there is a solution - or a workarround - we can pay for it if needed.\r\n\r\nWe use native version of ModSecurity for Debian, so 2.9.3 on Buster and Bullseye.\r\n\r\nThis (xmlrpc.php for Wordpress) : \r\n\r\n```\r\n--90fbbb2d-C--\r\nwp.getOptions0rootmy-password\r\n--90fbbb2d-F--\r\n```\r\n\r\nShould be sanitized like this : \r\n\r\n```\r\n--90fbbb2d-C--\r\nwp.getOptions0root***********\r\n--90fbbb2d-F--\r\n```\r\n\r\n`To iterate your tests : curl -X POST -H 'Content-Type: text/xml' -d 'wp.getUsersBlogsadminmy-password' https://your-link`\r\n\r\nIt concerns as well SOAP transactions.\r\n\r\nHere is some tries we did (and fails) : \r\n\r\n\r\nSecRule REQUEST_HEADERS:Content-Type \"text/xml\" \"id:'2100002',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML\"\r\n\r\n#SecRule XML:*/string/text() \"^(.*)$\" \"id:1280007,phase:5,nolog,pass,sanitiseMatched\"\r\n#SecRule XML:*string/text() \"^(.*)$\" \"id:1280008,phase:5,nolog,pass,sanitiseMatched\"\r\n#SecRule XML:string/text() \"^(.*)$\" \"id:1280009,phase:5,nolog,pass,sanitiseMatched\"\r\n#SecRule XML:string/ \"^(.*)$\" \"id:1280010,phase:5,nolog,pass,sanitiseMatched\"\r\n#SecRule XML:string \"^(.*)$\" \"id:1280011,phase:5,nolog,pass,sanitiseMatched\"\r\n\r\nThanks you for your time.\r\n\r\nKind regards,\r\n\r\nVincent\r\n\r\n", + "summary": "Sanitize XML content in ModSecurity. Refer to issues #360 and #587 for context. The goal is to modify XML requests, specifically for WordPress XML-RPC, to mask sensitive information like passwords.\n\n1. Identify XML payloads in requests. Use the following structure for incoming XML data:\n ```xml\n wp.getOptions0rootmy-password\n ```\n\n2. Transform the XML to mask the password, resulting in:\n ```xml\n wp.getOptions0root***********\n ```\n\n3. Use cURL to test the XML sanitization:\n ```bash\n curl -X POST -H 'Content-Type: text/xml' -d 'wp.getUsersBlogsadminmy-password' https://your-link\n ```\n\n4. Review and modify existing rules:\n - Start with:\n ```apache\n SecRule REQUEST_HEADERS:Content-Type \"text/xml\" \"id:'2100002',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML\"\n ```\n - Experiment with sanitizing rules for XML:\n ```apache\n #Example rules that need tweaking\n SecRule XML:*/string/text() \"^(.*)$\" \"id:1280007,phase:5,nolog,pass,sanitiseMatched\"\n ```\n\n5. Test and refine these rules in a development environment. Ensure proper handling of SOAP transactions as well.\n\n6. Engage with the community or consider hiring expertise if needed to expedite the process. \n\nBy following these steps, start tackling the XML sanitization issue in ModSecurity.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2779", @@ -61881,8 +64035,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, - 414 + 414, + 398 ] } }, @@ -61895,6 +64049,7 @@ "node_id": "I_kwDOABQmks5PieL4", "title": "NULL pointer", "body": "In modsec_var_log_handler(), ther's a check for msr being NULL:\r\n if (msr == NULL) return NULL;\r\n return construct_single_var(msr, name);\r\n\r\nconstruct_single_var() uses msr->msc_rule_mptmp, so this should also be checked:\r\n if (msr->msc_rule_mptmp == NULL) return NULL;\r\n", + "summary": "Check for NULL pointers in `modsec_var_log_handler()`. Currently, the function checks if `msr` is NULL and returns NULL if it is. However, before calling `construct_single_var(msr, name)`, also validate `msr->msc_rule_mptmp`. If `msc_rule_mptmp` is NULL, return NULL to prevent potential dereferencing issues.\n\nFollow these steps to implement the fix:\n\n1. Modify the `modsec_var_log_handler()` function to include a check for `msr->msc_rule_mptmp` after verifying that `msr` is not NULL.\n2. Implement the check as follows:\n ```c\n if (msr == NULL || msr->msc_rule_mptmp == NULL) return NULL;\n ```\n3. Test the changes thoroughly to ensure that the function behaves correctly when both `msr` and `msr->msc_rule_mptmp` are NULL.\n4. Review any related code in the module to see if similar checks are necessary elsewhere.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2782", @@ -61923,6 +64078,7 @@ "node_id": "I_kwDOABQmks5RXLC_", "title": "Handle strange Windows usernames", "body": "To cope with strange characters in Windows usernames, \r\nin /iis/wix/list_dependencies.bat\r\nline 25 should be quoted: \r\n`SET log_file=\"%TEMP%\"\\ModSecurityIIS-depedencies-%log_file:~-5%.TXT`\r\n", + "summary": "Fix handling of unusual Windows usernames by modifying the `list_dependencies.bat` script. Quote the log file path on line 25 to ensure proper handling of special characters. Update the line to:\n\n```bat\nSET log_file=\"%TEMP%\\ModSecurityIIS-depedencies-%log_file:~-5%.TXT\"\n```\n\nBegin by locating the `list_dependencies.bat` file in the `/iis/wix/` directory. Open the file and navigate to line 25. Replace the existing line with the corrected version. Test the changes with usernames containing special characters to ensure compatibility.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2798", @@ -61938,8 +64094,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, - 401 + 401, + 398 ] } }, @@ -61952,6 +64108,7 @@ "node_id": "I_kwDOABQmks5TTCo4", "title": "Bug: Premature EOF in JSON leads to status 500 instead of REQBODY_ERROR", "body": "**Describe the bug**\r\n\r\nInstead of failing safely, ModSec triggers a status 500.\r\n\r\n```\r\n$ cat example.json\r\n{ \"id\" : \"123\"\r\n$ curl -v http://localhost -H \"Content-Type: application/json\" -d @example.json\r\n...\r\n< HTTP/1.1 500 Internal Server Error\r\n...\r\n```\r\n\r\n**Logs and dumps**\r\n\r\n```\r\n[2022-10-05 11:17:58.926606] [-:error] 127.0.0.1:39976 Yz1Lxuf_TBfAeTSTcu5oFwAAAAk [client 127.0.0.1] ModSecurity: JSON parser error: parse error: premature EOF\\n [hostname \"localhost\"] [uri \"/\"] [unique_id \"Yz1Lxuf_TBfAeTSTcu5oFwAAAAk\"]\r\n[2022-10-05 11:17:58.926624] [-:error] 127.0.0.1:39976 Yz1Lxuf_TBfAeTSTcu5oFwAAAAk [client 127.0.0.1] ModSecurity: JSON parser error: parse error: premature EOF\\n [hostname \"localhost\"] [uri \"/\"] [unique_id \"Yz1Lxuf_TBfAeTSTcu5oFwAAAAk\"]\r\n[2022-10-05 11:17:58.926629] [core:trace3] 127.0.0.1:39976 Yz1Lxuf_TBfAeTSTcu5oFwAAAAk fixups hook gave 500: /\r\n```\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): ModSec 2.9.6 \r\n - WebServer: Apache 2.5.54\r\n\r\n**Additional information:**\r\n\r\nI played around with broken JSON payloads following a customer request. This was the only case where I could make ModSecurity stumble.", + "summary": "**Fix JSON Parsing Error Handling in ModSecurity**\n\nEnsure ModSecurity returns a proper REQBODY_ERROR status instead of a 500 Internal Server Error when encountering a premature EOF in JSON payloads.\n\n**Steps to tackle the problem:**\n\n1. **Review JSON Parsing Logic**: Investigate the section of the code responsible for JSON parsing within ModSecurity. Identify where the premature EOF is detected and analyze how the error handling is currently implemented.\n\n2. **Error Handling Update**: Modify the error handling logic to differentiate between JSON parsing errors and other server errors. Ensure that a proper REQBODY_ERROR status is returned for JSON-specific issues.\n\n3. **Testing**: Create a series of tests with various malformed JSON payloads, including the provided example, to verify that the system correctly handles these cases without triggering a 500 status.\n\n4. **Documentation**: Update any relevant documentation to reflect the changes made in error handling, providing guidance on how to handle JSON parsing errors.\n\n5. **Deploy Changes**: Once testing is complete and all cases are handled appropriately, prepare the changes for deployment and monitor the system to ensure no new issues arise.\n\nBy addressing this issue, improve the robustness of ModSecurity’s error handling for JSON inputs, enhancing user experience and system reliability.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2807", @@ -61980,6 +64137,7 @@ "node_id": "I_kwDOABQmks5T56oc", "title": "Transaction intializing twice per request for URI '/'", "body": "**Transaction intializing twice**\r\nFor the mod security rules that I have wrote, one single request initializes the transaction twice and runs associated lua scripts twice.\r\n\r\n**Logs and dumps**\r\n\r\nOutput of:\r\n 1. DebugLogs (level 9)\r\n ```\r\n[166567211462.029228] [] [4] Initializing transaction\r\n[166567211462.029228] [] [4] Transaction context created.\r\n[166567211462.029228] [] [4] Starting phase CONNECTION. (SecRules 0)\r\n[166567211462.029228] [] [9] This phase consists of 0 rule(s).\r\n[166567211462.029228] [] [4] Starting phase URI. (SecRules 0 + 1/2)\r\n[166567211462.029228] [/] [4] Starting phase REQUEST_HEADERS. (SecRules 1)\r\n[166567211462.029228] [/] [9] This phase consists of 8 rule(s).\r\n[166567211462.029228] [/] [4] (Rule: 200000) Executing operator \"Rx\" with param \"^(?:application(?:/soap\\+|/)|text/)xml\" against REQUEST_HEADERS:Content-Type.\r\n[166567211462.029228] [/] [4] Rule returned 0.\r\n[166567211462.029228] [/] [9] Matched vars cleaned.\r\n[166567211462.029228] [/] [4] (Rule: 200001) Executing operator \"Rx\" with param \"^application/json\" against REQUEST_HEADERS:Content-Type.\r\n[166567211462.029228] [/] [4] Rule returned 0.\r\n[166567211462.029228] [/] [9] Matched vars cleaned.\r\n[166567211462.029228] [/] [4] (Rule: 100001) Executing operator \"Eq\" with param \"0\" against USER:EOYVxTvk_twst.\r\n[166567211462.029228] [/] [9] Target value: \"0\" (Variable: USER:EOYVxTvk_twst)\r\n[166567211462.029228] [/] [9] Matched vars updated.\r\n[166567211462.029228] [/] [4] Running [independent] (non-disruptive) action: setvar\r\n[166567211462.029228] [/] [8] Saving variable: USER:EOYVxTvk_twst with value: 1665672114\r\n[166567211462.029228] [/] [4] Rule returned 1.\r\n[166567211462.029228] [/] [4] Running (disruptive) action: pass.\r\n[166567211462.029228] [/] [8] Running action pass\r\n[166567211462.029228] [/] [4] (Rule: 100002) Executing operator \"Rx\" with param \"\\Qfirefox\\E\" against REQUEST_HEADERS:User-Agent.\r\n[166567211462.029228] [/] [9] Target value: \"firefox\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[166567211462.029228] [/] [9] Matched vars updated.\r\n[166567211462.029228] [/] [4] Running [independent] (non-disruptive) action: setvar\r\n[166567211462.029228] [/] [8] Saving variable: USER:dynamic_var with value: user.EOYVxTvk\r\n[166567211462.029228] [/] [4] Rule returned 1.\r\n[166567211462.029228] [/] [9] Running action: log\r\n[166567211462.029228] [/] [9] Saving transaction to logs\r\n[166567211462.029228] [/] [9] Running action: auditlog\r\n[166567211462.029228] [/] [9] Saving transaction to logs\r\n[166567211462.029228] [/] [9] Running action: setuid\r\n[166567211462.029228] [/] [8] User collection initiated with value: ''.\r\n[166567211462.029228] [/] [9] Running action: exec\r\n[166567211462.029228] [/] [8] Running script... /usr/local/openresty/nginx/lua/epoch_time_update.lua\r\n[166567211462.029228] [/] [1]\r\n[166567211462.029228] [/] [1] twst_var_name user.EOYVxTvk_twst\r\n[166567211462.029228] [/] [1] req_rate_var user.EOYVxTvk\r\n[166567211462.029228] [/] [1] throttle_window_start 1665672114\r\n[166567211462.029228] [/] [1] throttle_window_end 1665672174\r\n[166567211462.029228] [/] [1] cant find req_rate_var user.EOYVxTvk\r\n[166567211462.029228] [/] [1] current_req_rate 0\r\n[166567211462.029228] [/] [1] setting current_req_rate to 1\r\n[166567211462.029228] [/] [9] Returning from lua script:\r\n[166567211462.029228] [/] [4] (Rule: 100004) Executing operator \"Gt\" with param \"30\" against USER:EOYVxTvk.\r\n[166567211462.029228] [/] [9] Target value: \"1\" (Variable: USER:::::EOYVxTvk)\r\n[166567211462.029228] [/] [4] Rule returned 0.\r\n[166567211462.029228] [/] [9] Matched vars cleaned.\r\n[166567211462.029228] [/] [4] (Rule: 100005) Executing operator \"Eq\" with param \"0\" against USER:DCrgmTrP_twst.\r\n[166567211462.029228] [/] [9] Target value: \"0\" (Variable: USER:DCrgmTrP_twst)\r\n[166567211462.029228] [/] [9] Matched vars updated.\r\n[166567211462.029228] [/] [4] Running [independent] (non-disruptive) action: setvar\r\n[166567211462.029228] [/] [8] Saving variable: USER:DCrgmTrP_twst with value: 1665672114\r\n[166567211462.029228] [/] [4] Rule returned 1.\r\n[166567211462.029228] [/] [4] Running (disruptive) action: pass.\r\n[166567211462.029228] [/] [8] Running action pass\r\n[166567211462.029228] [/] [4] (Rule: 100006) Executing operator \"Rx\" with param \"\\Qchrome browser\\E\" against REQUEST_HEADERS:User-Agent.\r\n[166567211462.029228] [/] [9] Target value: \"firefox\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[166567211462.029228] [/] [4] Rule returned 0.\r\n[166567211462.029228] [/] [9] Matched vars cleaned.\r\n[166567211462.029228] [/] [4] (Rule: 100008) Executing operator \"Gt\" with param \"45\" against USER:DCrgmTrP.\r\n[166567211462.029228] [/] [4] Rule returned 0.\r\n[166567211462.029228] [/] [9] Matched vars cleaned.\r\n[166567211462.029228] [/] [4] Starting phase REQUEST_BODY. (SecRules 2)\r\n[166567211462.029228] [/] [9] This phase consists of 5 rule(s).\r\n[166567211462.029228] [/] [4] (Rule: 200007) Executing operator \"Ge\" with param \"1000\" against ARGS.\r\n[166567211462.029228] [/] [9] Target value: \"0\" (Variable: ARGS)\r\n[166567211462.029228] [/] [4] Rule returned 0.\r\n[166567211462.029228] [/] [9] Matched vars cleaned.\r\n[166567211462.029228] [/] [4] (Rule: 200002) Executing operator \"Eq\" with param \"0\" against REQBODY_ERROR.\r\n[166567211462.029228] [/] [9] Target value: \"0\" (Variable: REQBODY_ERROR)\r\n[166567211462.029228] [/] [4] Rule returned 0.\r\n[166567211462.029228] [/] [9] Matched vars cleaned.\r\n[166567211462.029228] [/] [4] (Rule: 200003) Executing operator \"Eq\" with param \"0\" against MULTIPART_STRICT_ERROR.\r\n[166567211462.029228] [/] [9] Target value: \"\" (Variable: MULTIPART_STRICT_ERROR)\r\n[166567211462.029228] [/] [4] Rule returned 0.\r\n[166567211462.029228] [/] [9] Matched vars cleaned.\r\n[166567211462.029228] [/] [4] (Rule: 200004) Executing operator \"Eq\" with param \"1\" against MULTIPART_UNMATCHED_BOUNDARY.\r\n[166567211462.029228] [/] [9] Target value: \"\" (Variable: MULTIPART_UNMATCHED_BOUNDARY)\r\n[166567211462.029228] [/] [4] Rule returned 0.\r\n[166567211462.029228] [/] [9] Matched vars cleaned.\r\n[166567211462.029228] [/] [4] (Rule: 200005) Executing operator \"StrEq\" with param \"0\" against TX:regex(^MSC_).\r\n[166567211462.029228] [/] [4] Rule returned 0.\r\n[166567211462.029228] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [] [4] Initializing transaction\r\n[166567211414.410830] [] [4] Transaction context created.\r\n[166567211414.410830] [] [4] Starting phase CONNECTION. (SecRules 0)\r\n[166567211414.410830] [] [9] This phase consists of 0 rule(s).\r\n[166567211414.410830] [] [4] Starting phase URI. (SecRules 0 + 1/2)\r\n[166567211414.410830] [/] [4] Starting phase REQUEST_HEADERS. (SecRules 1)\r\n[166567211414.410830] [/] [9] This phase consists of 8 rule(s).\r\n[166567211414.410830] [/] [4] (Rule: 200000) Executing operator \"Rx\" with param \"^(?:application(?:/soap\\+|/)|text/)xml\" against REQUEST_HEADERS:Content-Type.\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] (Rule: 200001) Executing operator \"Rx\" with param \"^application/json\" against REQUEST_HEADERS:Content-Type.\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] (Rule: 100001) Executing operator \"Eq\" with param \"0\" against USER:EOYVxTvk_twst.\r\n[166567211414.410830] [/] [9] Target value: \"1\" (Variable: USER:EOYVxTvk_twst)\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] (Rule: 100002) Executing operator \"Rx\" with param \"\\Qfirefox\\E\" against REQUEST_HEADERS:User-Agent.\r\n[166567211414.410830] [/] [9] Target value: \"firefox\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[166567211414.410830] [/] [9] Matched vars updated.\r\n[166567211414.410830] [/] [4] Running [independent] (non-disruptive) action: setvar\r\n[166567211414.410830] [/] [8] Saving variable: USER:dynamic_var with value: user.EOYVxTvk\r\n[166567211414.410830] [/] [4] Rule returned 1.\r\n[166567211414.410830] [/] [9] Running action: log\r\n[166567211414.410830] [/] [9] Saving transaction to logs\r\n[166567211414.410830] [/] [9] Running action: auditlog\r\n[166567211414.410830] [/] [9] Saving transaction to logs\r\n[166567211414.410830] [/] [9] Running action: setuid\r\n[166567211414.410830] [/] [8] User collection initiated with value: ''.\r\n[166567211414.410830] [/] [9] Running action: exec\r\n[166567211414.410830] [/] [8] Running script... /usr/local/openresty/nginx/lua/epoch_time_update.lua\r\n[166567211414.410830] [/] [1]\r\n[166567211414.410830] [/] [1] twst_var_name user.EOYVxTvk_twst\r\n[166567211414.410830] [/] [1] req_rate_var user.EOYVxTvk\r\n[166567211414.410830] [/] [1] throttle_window_start 1665672114\r\n[166567211414.410830] [/] [1] throttle_window_end 1665672174\r\n[166567211414.410830] [/] [1] current_req_rate 1\r\n[166567211414.410830] [/] [1] setting current_req_rate to 2\r\n[166567211414.410830] [/] [9] Returning from lua script:\r\n[166567211414.410830] [/] [4] (Rule: 100004) Executing operator \"Gt\" with param \"30\" against USER:EOYVxTvk.\r\n[166567211414.410830] [/] [9] Target value: \"2\" (Variable: USER:::::EOYVxTvk)\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] (Rule: 100005) Executing operator \"Eq\" with param \"0\" against USER:DCrgmTrP_twst.\r\n[166567211414.410830] [/] [9] Target value: \"1\" (Variable: USER:DCrgmTrP_twst)\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] (Rule: 100006) Executing operator \"Rx\" with param \"\\Qchrome browser\\E\" against REQUEST_HEADERS:User-Agent.\r\n[166567211414.410830] [/] [9] Target value: \"firefox\" (Variable: REQUEST_HEADERS:User-Agent)\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] (Rule: 100008) Executing operator \"Gt\" with param \"45\" against USER:DCrgmTrP.\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] Starting phase REQUEST_BODY. (SecRules 2)\r\n[166567211414.410830] [/] [9] This phase consists of 5 rule(s).\r\n[166567211414.410830] [/] [4] (Rule: 200007) Executing operator \"Ge\" with param \"1000\" against ARGS.\r\n[166567211414.410830] [/] [9] Target value: \"0\" (Variable: ARGS)\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] (Rule: 200002) Executing operator \"Eq\" with param \"0\" against REQBODY_ERROR.\r\n[166567211414.410830] [/] [9] Target value: \"0\" (Variable: REQBODY_ERROR)\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] (Rule: 200003) Executing operator \"Eq\" with param \"0\" against MULTIPART_STRICT_ERROR.\r\n[166567211414.410830] [/] [9] Target value: \"\" (Variable: MULTIPART_STRICT_ERROR)\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] (Rule: 200004) Executing operator \"Eq\" with param \"1\" against MULTIPART_UNMATCHED_BOUNDARY.\r\n[166567211414.410830] [/] [9] Target value: \"\" (Variable: MULTIPART_UNMATCHED_BOUNDARY)\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] (Rule: 200005) Executing operator \"StrEq\" with param \"0\" against TX:regex(^MSC_).\r\n[166567211414.410830] [/] [4] Rule returned 0.\r\n[166567211414.410830] [/] [9] Matched vars cleaned.\r\n[166567211414.410830] [/] [4] Starting phase RESPONSE_HEADERS. (SecRules 3)\r\n[166567211414.410830] [/] [9] This phase consists of 0 rule(s).\r\n[166567211414.410830] [/] [9] Appending response body: 1097 bytes. Limit set to: 524288.000000\r\n[166567211414.410830] [/] [4] Starting phase RESPONSE_BODY. (SecRules 4)\r\n[166567211414.410830] [/] [9] This phase consists of 0 rule(s).\r\n[166567211414.410830] [/] [4] Starting phase LOGGING. (SecRules 5)\r\n[166567211414.410830] [/] [9] This phase consists of 0 rule(s).\r\n[166567211414.410830] [/] [8] Checking if this request is suitable to be saved as an audit log.\r\n[166567211414.410830] [/] [8] Checking if this request is relevant to be part of the audit logs.\r\n[166567211414.410830] [/] [5] Saving this request as part of the audit logs.\r\n[166567211414.410830] [/] [8] Request was relevant to be saved. Parts: 6006\r\n```\r\n 2. AuditLogs\r\n ```\r\n ---SJm0ppeU---A--\r\n[13/Oct/2022:14:41:54 +0000] 166567211414.410830 10.0.96.4 64454 10.11.17.192 80\r\n---SJm0ppeU---B--\r\nGET / HTTP/1.1\r\nUser-Agent: firefox\r\nPostman-Token: 42c3c623-d140-40d5-9361-2cae719fb36a\r\nAccept: */*\r\nHost: 10.11.17.192\r\nAccept-Encoding: gzip, deflate, br\r\nConnection: keep-alive\r\n\r\n---SJm0ppeU---D--\r\n\r\n---SJm0ppeU---E--\r\n\\x0a\\x0a\\x0a\\x0a\\x0aWelcome to OpenResty!\\x0a\\x0a\\x0a\\x0a

Welcome to OpenResty!

\\x0a

If you see this page, the OpenResty web platform is successfully installed and\\x0aworking. Further configuration is required.

\\x0a\\x0a

For online documentation and support please refer to our\\x0aopenresty.org site
\\x0aCommercial support is available at\\x0aopenresty.com.

\\x0a

We have articles on troubleshooting issues like high CPU usage and\\x0alarge memory usage on our official blog site.\\x0a

Thank you for flying OpenResty.

\\x0a\\x0a\\x0a\r\n\r\n---SJm0ppeU---F--\r\nHTTP/1.1 200\r\nServer: openresty/1.21.4.1\r\nDate: Thu, 13 Oct 2022 14:41:54 GMT\r\nContent-Length: 1097\r\nContent-Type: text/html\r\nLast-Modified: Thu, 06 Oct 2022 09:29:18 GMT\r\nConnection: keep-alive\r\nETag: \"633e9fee-449\"\r\n\r\n---SJm0ppeU---H--\r\nModSecurity: Warning. Matched \"Operator `Rx' with parameter `\\Qfirefox\\E' against variable `REQUEST_HEADERS:User-Agent' (Value: `firefox' ) [file \"/usr/local/openresty/nginx/modsec/main.conf\"] [line \"8\"] [id \"100002\"] [rev \"\"] [msg \"\"] [data \"\"] [severity \"0\"] [ver \"\"] [maturity \"0\"] [accuracy \"0\"] [hostname \"10.11.17.192\"] [uri \"/\"] [unique_id \"166567211414.410830\"] [ref \"o0,7v27,7\"]\r\n\r\n---SJm0ppeU---I--\r\n\r\n---SJm0ppeU---J--\r\n\r\n---SJm0ppeU---Z--\r\n\r\n\r\n```\r\n\r\n_Notice:_ Be carefully to not leak any confidential information.\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the behavior:\r\nAdd the following modsecurity configuration;\r\n```\r\nSecRule &user.EOYVxTvk_twst \"@eq 0\" \"phase:1,id:100001,pass,setvar:user.EOYVxTvk_twst=%{TIME_EPOCH}\"\r\n\r\nSecRule REQUEST_HEADERS:User-Agent \"\\Qfirefox\\E\" \"id:100002,phase:1,log,auditlog,setuid:%{tx.ua_hash},setvar:user.dynamic_var=user.EOYVxTvk,exec:/usr/local/openresty/nginx/lua/epoch_time_update.lua\"\r\n\r\nSecRule user:EOYVxTvk \"@gt 30\" \"chain,id:100004,phase:1,auditlog,deny,status:429,setenv:RATELIMITED,log,msg:'RATELIMITED User-Agent'\"\r\n SecRule REQUEST_HEADERS:User-Agent \"\\Qfirefox\\E\"\r\n\r\n\r\nSecRule &user.DCrgmTrP_twst \"@eq 0\" \"phase:1,id:100005,pass,setvar:user.DCrgmTrP_twst=%{TIME_EPOCH}\"\r\n\r\nSecRule REQUEST_HEADERS:User-Agent \"\\Qchrome browser\\E\" \"id:100006,phase:1,log,auditlog,setuid:%{tx.ua_hash},setvar:user.dynamic_var=user.DCrgmTrP,exec:/usr/local/openresty/nginx/lua/epoch_time_update.lua\"\r\n\r\nSecRule user:DCrgmTrP \"@gt 45\" \"chain,id:100008,phase:1,auditlog,deny,status:429,setenv:RATELIMITED,log,msg:'RATELIMITED User-Agent'\"\r\n SecRule REQUEST_HEADERS:User-Agent \"\\Qchrome browser\\E\"\r\n```\r\nCurl command to make the request.\r\n```\r\ncurl --location --request GET 'http://10.11.17.192/' --header 'User-Agent: firefox'\r\n```\r\n\r\n**Expected behavior**\r\n\r\nExpectation was **Rule: 100002** would run only once per request, but its running twice.\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity v3.0.1 with nginx-connector v1.0.3\r\n - WebServer: nginx version: openresty/1.21.4.1\r\n - OS (and distro): Ubuntu 22.04.1 LTS\r\n\r\n\r\n**Rule Set (please complete the following information):**\r\n - Running any public or commercial rule set? No\r\n - What is the version number? [e.g. 2018-08-11] NA\r\n\r\n", + "summary": "**Investigate Transaction Initialization Issue**\n\n**Problem:** \nIdentify why the transaction is initializing twice for the same request at the URI '/' when using custom ModSecurity rules, leading to associated Lua scripts executing multiple times.\n\n**Steps to Reproduce:**\n1. Add the provided ModSecurity configuration to your setup.\n2. Execute the curl command:\n ```\n curl --location --request GET 'http://10.11.17.192/' --header 'User-Agent: firefox'\n ```\n\n**Technical Details:**\n- The logs indicate two transactions are being initialized for the same request, as evidenced by the repeated \"Initializing transaction\" entries at different timestamps.\n- Rule **100002** is intended to execute once per request but appears to run twice according to the debug logs, where both executions can be seen.\n- Each execution involves setting and executing variables, and the Lua script `/usr/local/openresty/nginx/lua/epoch_time_update.lua` is being called twice, which may lead to incorrect state or behavior in handling user requests.\n\n**Initial Investigation Steps:**\n1. **Review Transaction Lifecycle:** Check if the rules are inadvertently triggering multiple transactions due to misconfiguration or rule chaining.\n2. **Analyze Rule Execution:** Examine if there are conditions that could cause the ModSecurity engine to re-evaluate the request or if the rules are conflicting in their execution phases.\n3. **Debug Logging:** Increase the debug log level to capture finer details about transaction handling and rule evaluations to better pinpoint where the duplication occurs.\n4. **Simplify Rules:** Temporarily simplify the rules to isolate the problematic rule or interaction. Start with just the first rule and gradually add others back to identify the cause of the double initialization.\n5. **Consult Documentation:** Review ModSecurity documentation for any known issues related to transaction initialization and Lua script execution that may provide insights into this behavior.\n\nBy following these steps, you can narrow down the source of the issue and apply appropriate fixes or adjustments to the ModSecurity configuration.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2816", @@ -62008,6 +64166,7 @@ "node_id": "I_kwDOABQmks5UccWf", "title": "ModSecurity can cause hang of apache httpd during graceful restart", "body": "**Describe the bug**\r\n\r\nWhen modsec is configured to pipe logs to different process (like rotatelogs for example) it can cause hang of apache httpd during graceful restart.\r\n\r\nWhen SecAuditLog directive is set as for example`SecAuditLog \"|/usr/sbin/rotatelogs -l -n 3 /var/log/httpd/modsec_audit.log 50M\"`\r\nmodsec opens pipe to rotatelogs during its configuration in `cmd_audit_log` function in apache2/apache2_config.c:1210\r\n\r\n```\r\n if (dcfg->auditlog_name[0] == '|') {\r\n const char *pipe_name = dcfg->auditlog_name + 1;\r\n piped_log *pipe_log;\r\n\r\n pipe_log = ap_open_piped_log(cmd->pool, pipe_name); <------------\r\n if (pipe_log == NULL) {\r\n return apr_psprintf(cmd->pool, \"ModSecurity: Failed to open the audit log pipe: %s\",\r\n pipe_name);\r\n }\r\n dcfg->auditlog_fd = ap_piped_log_write_fd(pipe_log);\r\n```\r\nThere is a problem that child processes created by httpd inherit the read side of pipe even when they are not using it and are meant to close them on startup which is ensured by read_handles global variable in server/log.c in apache httpd source.\r\nFor illustration here is code handling recording of the read handle:\r\n```\r\n/* remember to close this handle in the child process\r\n *\r\n * On Win32 this makes zero sense, because we don't\r\n * take the parent process's child procs.\r\n * If the win32 parent instead passed each and every\r\n * logger write handle from itself down to the child,\r\n * and the parent manages all aspects of keeping the\r\n * reliable pipe log children alive, this would still\r\n * make no sense :) Cripple it on Win32.\r\n */\r\nstatic void close_handle_in_child(apr_pool_t *p, apr_file_t *f)\r\n{\r\n#ifndef WIN32\r\n read_handle_t *new_handle;\r\n\r\n new_handle = apr_pcalloc(p, sizeof(read_handle_t));\r\n new_handle->next = read_handles;\r\n new_handle->handle = f;\r\n read_handles = new_handle;\r\n#endif\r\n}\r\n```\r\nThese handles get cleared when plog memory pool gets cleared which happens after the configuration is loaded and before open logs\r\nhooks are called. That means that child will get no reference to opened read side of the pipe and thus can not close it. This causes that process can wait for the write inside of the pipe indefinitely and will never get EPIPE from the OS informing the process about rotatelogs being already killed. This is why process waiting for write will hang.\r\n\r\n**Logs and dumps**\r\n\r\nOutput of:\r\n 1. DebugLogs (level 9)\r\n \r\n[modsec_debug.log.tar.gz](https://github.com/SpiderLabs/ModSecurity/files/9829252/modsec_debug.log.tar.gz)\r\n\r\n 2. AuditLogs\r\n\r\nEmpty because reproducing requires to stop rotatelogs process. \r\n\r\n 3. Error logs\r\n \r\n[error_log.txt](https://github.com/SpiderLabs/ModSecurity/files/9830681/error_log.txt)\r\n\r\n4. If there is a crash, the core dump file.\r\n\r\nBacktrace seems sufficient to me.\r\n#0 0x00007f560757259f in write () from target:/lib64/libc.so.6\r\n#1 0x00007f560767aa1f in apr_file_write () from target:/lib64/libapr-1.so.0\r\n#2 0x00007f560767ac4a in apr_file_write_full () from target:/lib64/libapr-1.so.0\r\n#3 0x00007f56067a8ea1 in sec_auditlog_write.isra () from target:/etc/httpd/modules/mod_security2.so\r\n#4 0x00007f5606784d21 in sec_audit_logger_native () from target:/etc/httpd/modules/mod_security2.so\r\n#5 0x00007f560677c10c in modsecurity_process_phase () from target:/etc/httpd/modules/mod_security2.so\r\n#6 0x00007f560676f9bd in hook_log_transaction () from target:/etc/httpd/modules/mod_security2.so\r\n#7 0x000055f66b95ba98 in ap_run_log_transaction ()\r\n#8 0x000055f66b96f89b in eor_bucket_cleanup ()\r\n#9 0x00007f560767dade in apr_pool_destroy () from target:/lib64/libapr-1.so.0\r\n#10 0x000055f66b964cc2 in eor_bucket_destroy ()\r\n#11 0x000055f66b96fa89 in ap_core_output_filter ()\r\n#12 0x000055f66b979dab in ap_process_request ()\r\n#13 0x000055f66b97a073 in ap_process_http_connection ()\r\n#14 0x000055f66b945e98 in ap_run_process_connection ()\r\n#15 0x00007f5606ff3292 in child_main () from target:/etc/httpd/modules/mod_mpm_prefork.so\r\n#16 0x00007f5606ff3607 in make_child () from target:/etc/httpd/modules/mod_mpm_prefork.so\r\n#17 0x00007f5606ff3669 in startup_children () from target:/etc/httpd/modules/mod_mpm_prefork.so\r\n#18 0x00007f5606ff3d93 in prefork_run () from target:/etc/httpd/modules/mod_mpm_prefork.so\r\n#19 0x000055f66b9467c8 in ap_run_mpm ()\r\n#20 0x000055f66b9346d5 in main ()\r\n\r\n**To Reproduce**\r\nThis problem is tricky to reproduce so forgive me for a complex reproducer, repeat these steps:\r\n\r\n1. Set SecAuditLog directive inside the modsec configuration to:\r\n`SecAuditLog \"|/usr/sbin/rotatelogs -l -n 3 /var/log/httpd/modsec_audit.log 50M\"`\r\n2. enable mpm prefork module like this:\r\n`LoadModule mpm_prefork_module modules/mod_mpm_prefork.so`\r\n3. Add mpm prefork module configuration\r\n```\r\n\r\nStartServers 1\r\nMinSpareServers 1\r\nMaxSpareServers 1\r\nServerLimit 1\r\nMaxRequestWorkers 1\r\nMaxRequestsPerChild 0\r\nMaxConnectionsPerChild 0\r\n\r\n```\r\nThese steps make it easier to reproduce the issue\r\n4. Create index page for httpd\r\n\r\n`$ touch /var/www/html/index.html`\r\n\r\n5. Start httpd (i am using systemd since its easier to keep track of processes)\r\n\r\n`# systemctl start httpd`\r\n\r\n6. Find rotatelogs process\r\n\r\n`$ systemctl status httpd`\r\n\r\n7. stop the rotatelogs process\r\n\r\n`# kill -STOP `\r\n\r\n8. Perform requests to fill pipes buffer with modsecurity logs (you must have active rules that log something for this request)\r\n\r\n`$ while true; do curl http://127.0.0.1 -m 5; [ $? -eq 28 ] && break; done`\r\n\r\n9. Try to perform graceful restart\r\n\r\n`# systemctl reload httpd`\r\n\r\n10. Resume rotatelogs process\r\n\r\n`# kill -CONT `\r\n\r\n11. Find httpd processes with systemctl and check whether one of them is not stuck\r\n\r\n`$ systemctl status httpd`\r\n\r\n`# for PID in ''; do pstack $PID >> ./pstack_logs; done;`\r\n\r\n`$ cat ./pstack_logs | grep 'in write ('`\r\n\r\n12. If grep find string `in write (` then one of the processes is still waiting for other process to read from previously created pipe.\r\n\r\n**Expected behavior**\r\n\r\nNo hanging process.\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): 2.9.6\r\n - WebServer: 2.4.54\r\n - OS (and distro): Linux Fedora Rawhide\r\n\r\n**Rule Set (please complete the following information):**\r\n - Running any public or commercial rule set?\r\n Yes coreruleset mod_security_crs 3.3.4 from Fedora\r\n - What is the version number? [e.g. 2018-08-11]\r\n3.3.4-1\r\n\r\n**Additional context**\r\nI am creating a PR that fixes this issue.\r\nIt is sufficient that the code which opens the auditlog is moved to a open_logs hook instead of being executed in a configuration command.\r\n\r\nPlease do not hesitate to ask further questions.\r\n", + "summary": "### Summary of the GitHub Issue: ModSecurity Causes Apache HTTPD to Hang During Graceful Restart\n\n**Issue Description**: \nModSecurity hangs Apache HTTPD during a graceful restart when configured to pipe logs to a different process, such as `rotatelogs`. The issue occurs because child processes inherit the read side of a pipe that is not properly closed, causing them to wait indefinitely for a write operation.\n\n**Technical Details**:\n- When the `SecAuditLog` directive is set to pipe logs (e.g., `SecAuditLog \"|/usr/sbin/rotatelogs -l -n 3 /var/log/httpd/modsec_audit.log 50M\"`), ModSecurity opens a pipe in the `cmd_audit_log` function in `apache2/apache2_config.c`. \n- The child processes created by HTTPD inherit the read side of the pipe but do not close it properly. This is due to the management of read handles in `server/log.c`, which may not work as expected after the configuration is loaded.\n- The hanging occurs because the child processes wait for a write operation that never receives a response, as the `rotatelogs` process may be stopped.\n\n**Reproduction Steps**:\n1. Configure `SecAuditLog` with a pipe to `rotatelogs`.\n2. Enable the prefork MPM module.\n3. Set MPM configuration for a single process.\n4. Start HTTPD and the rotatelogs process.\n5. Stop the rotatelogs process.\n6. Generate requests that log data through ModSecurity.\n7. Attempt a graceful restart of HTTPD.\n8. Resume the rotatelogs process and check for hanging processes.\n\n**Expected Behavior**: \nNo processes should hang during a graceful restart.\n\n**Potential First Steps to Tackle the Problem**:\n1. Move the code that opens the audit log pipe from the configuration command to an open_logs hook to ensure proper handling of the pipe lifecycle.\n2. Review the management of read handles in the Apache HTTPD codebase to ensure that they are properly closed in all child processes.\n3. Conduct thorough testing by repeating the reproduction steps after making code changes to confirm that the hang issue is resolved.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2822", @@ -62023,8 +64182,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, - 403 + 403, + 398 ] } }, @@ -62037,6 +64196,7 @@ "node_id": "I_kwDOABQmks5Zz6uY", "title": "Memory leaks with nginx reload (without restart)", "body": "When, instead of restarting nginx, one performs a ```reload``` of the configuration, memory may leak.\r\n\r\nThe memory leaks are not large per reload, but if doing so frequently, the free memory reduction will become noticeable. A near-term mitigation is to at least periodically do a true restart.\r\n\r\nThis issue is being created in lieu of #2502 and others.\r\n", + "summary": "Investigate memory leaks occurring during nginx configuration reloads. Note that these leaks are not significant per reload, but frequent reloads lead to noticeable free memory reduction.\n\n1. Review existing issues, particularly #2502, for insights and potential solutions.\n2. Analyze the nginx source code to identify areas where memory is allocated but not freed during reloads.\n3. Monitor memory usage patterns during reloads to quantify leaks.\n4. Implement logging to track memory allocation and deallocation during the reload process.\n5. Explore the possibility of introducing a workaround, such as scheduling regular complete restarts of nginx to reclaim memory.\n6. Test any changes in a staging environment before deploying to production to ensure stability and performance.\n\nAddress these steps to mitigate the memory leak issue effectively.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2848", @@ -62052,8 +64212,8 @@ "repository": 1213, "assignees": [], "labels": [ - 399, - 404 + 404, + 399 ] } }, @@ -62066,6 +64226,7 @@ "node_id": "I_kwDOABQmks5Z4IXW", "title": "Apache httpd does not execute ErrorDocument directive if response code is changed during phase 4", "body": "**Describe the bug**\r\nWhen response code is changed during phase 4 and the original response code is other than 200, Apache httpd does not execute ErrorDocument directive and bad response is generated.\r\n\r\n**Logs and dumps**\r\n\r\nOutput of:\r\n 1. DebugLogs (level 9)\r\n \r\n[modsec_debug.log](https://github.com/SpiderLabs/ModSecurity/files/10278739/modsec_debug.log)\r\n\r\n\r\n 2. AuditLogs\r\n\r\n[modsec_audit.log](https://github.com/SpiderLabs/ModSecurity/files/10278741/modsec_audit.log)\r\n\r\n\r\n 3. Error logs\r\n \r\n[error_log.txt](https://github.com/SpiderLabs/ModSecurity/files/10278744/error_log.txt)\r\n\r\n\r\n**To Reproduce**\r\n\r\n1. Add following configuration:\r\n```\r\n ProxyPass /helloworld http://localhost:9090/unavaible\r\n ErrorDocument 403 /custompage.html\r\n SecRule RESPONSE_PROTOCOL \"@contains HTTP\" \"id:'4',phase:4,auditlog,log,deny,status:403,msg:'yay!'\"\r\n```\r\n2. Create and place custompage.html page to server root.\r\n3. execute `$ curl -v localhost/helloworld`\r\n\r\n**Actual behavior**\r\n```\r\n\r\n\r\n403 Forbidden\r\n\r\n

Forbidden

\r\n

You don't have permission to access this resource.

\r\n

Additionally, a 503 Service Unavailable\r\nerror was encountered while trying to use an ErrorDocument to handle the request.

\r\n\r\n```\r\n\r\n**Expected behavior**\r\nProper rewritten response with custom error document:\r\n\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): mod_security-2.9.6\r\n - WebServer: httpd-2.4.37\r\n - OS (and distro): Linux Fedora\r\n\r\n**Rule Set (please complete the following information):**\r\n - Running any public or commercial rule set? No\r\n\r\n**Additional context**\r\n\r\nIn my opinion, this is caused by apache httpds assumption that changed status of request which got through filter is a recursive error which occured during handling of previous errors.\r\nSee `ap_die_r` function in http module of apache, particularly this part of code:\r\n```\r\n /*\r\n * The following takes care of Apache redirects to custom response URLs\r\n * Note that if we are already dealing with the response to some other\r\n * error condition, we just report on the original error, and give up on\r\n * any attempt to handle the other thing \"intelligently\"...\r\n */\r\n if (recursive_error != HTTP_OK) {\r\n while (r_1st_err->prev && (r_1st_err->prev->status != HTTP_OK))\r\n r_1st_err = r_1st_err->prev; /* Get back to original error */\r\n\r\n if (r_1st_err != r) {\r\n /* The recursive error was caused by an ErrorDocument specifying\r\n * an internal redirect to a bad URI. ap_internal_redirect has\r\n * changed the filter chains to point to the ErrorDocument's\r\n * request_rec. Back out those changes so we can safely use the\r\n * original failing request_rec to send the canned error message.\r\n *\r\n * ap_send_error_response gets rid of existing resource filters\r\n * on the output side, so we can skip those.\r\n */\r\n update_r_in_filters(r_1st_err->proto_output_filters, r, r_1st_err);\r\n update_r_in_filters(r_1st_err->input_filters, r, r_1st_err);\r\n }\r\n\r\n custom_response = NULL; /* Do NOT retry the custom thing! */\r\n }\r\n else {\r\n int error_index = ap_index_of_response(type);\r\n custom_response = ap_response_code_string(r, error_index);\r\n recursive_error = 0;\r\n }\r\n```\r\nClearing the original response code with `f->r->status = 200;` in `send_error_bucket` function appears to fix this issue.\r\n\r\nThis issue is a continuation of #533 and extends findings of Marc Stern in https://github.com/SpiderLabs/ModSecurity/issues/533#issuecomment-1270214868\r\n", + "summary": "**Summarize the Issue:**\nFix the Apache httpd behavior where the `ErrorDocument` directive does not execute if the response code is changed during phase 4, particularly when the original response code is not 200. This leads to a poor user experience where a generic 403 error is displayed instead of the custom error page.\n\n**Technical Details:**\n- The bug occurs when using ModSecurity rules that alter the response status during phase 4.\n- The `ap_die_r` function in the Apache HTTP module is incorrectly handling the recursive error state, preventing the proper rendering of the custom error document.\n- The current behavior results in a 503 Service Unavailable error when attempting to access a custom error page defined in the `ErrorDocument` directive.\n\n**Steps to Reproduce:**\n1. Add the following configuration to your Apache setup:\n ```apache\n ProxyPass /helloworld http://localhost:9090/unavaible\n ErrorDocument 403 /custompage.html\n SecRule RESPONSE_PROTOCOL \"@contains HTTP\" \"id:'4',phase:4,auditlog,log,deny,status:403,msg:'yay!'\"\n ```\n2. Place a custom error page named `custompage.html` in the server root directory.\n3. Execute the command: \n ```bash\n curl -v localhost/helloworld\n ```\n\n**Expected Outcome:**\nThe server should properly return the custom error page instead of the generic 403 Forbidden error.\n\n**Proposed First Steps to Tackle the Problem:**\n1. Review the `ap_die_r` function in the Apache source code to understand how it manages error states.\n2. Modify the handling of the `recursive_error` condition to allow execution of the `ErrorDocument` directive when the response status is changed.\n3. Implement a temporary fix by setting `f->r->status = 200;` in the `send_error_bucket` function to bypass the current limitation.\n4. Test the changes with various response codes and ensure that custom error documents are rendered correctly.\n5. Document findings and prepare to submit a patch for review.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2849", @@ -62081,9 +64242,9 @@ "repository": 1213, "assignees": [], "labels": [ - 398, 400, - 403 + 403, + 398 ] } }, @@ -62096,6 +64257,7 @@ "node_id": "I_kwDOABQmks5bQX6h", "title": "DOC: `SecRuleUpdateActionById` and `chain` docs are errant for both 2.x and 3.x ", "body": "The official doc for modsecurity 2.x and 3.x for both `SecRuleUpdateActionById` and `chain` are errant. They lead to errant rule writing and/or exposing underlying modsecurity bugs. I request clarification/rewrite of at least these two sections of both 2.x and 3.x docs.\r\n\r\nThis is a cascade issue from https://github.com/coreruleset/coreruleset/issues/3080#issuecomment-1374575642. I recommend reviewing that whole issue to understand the errant docs and gaps in information.\r\n\r\n### Doc Problems\r\n\r\n1. `SecRuleUpdateActionById` doesn't organize limitations/workarounds together; e.g. modsec 2.x `SecRuleUpdateActionById` on rule chains has a `chain` workaround (see issue above) and limitation (see issue above) and may have additional behavior/workaround changes in 3.x (I can't test 3.x)\r\n2. `chain` has wrong language, e.g. \"Non-disruptive rules\" should be \"Non-disruptive actions\"\r\n3. `chain` has wrong facts, e.g. \"they [non-disruptive actions] will be executed if the rule that contains them matches and not only when the entire chain matches\". That statement is false since it has been proven in 2.x (can't test myself in 3.x) that `setvar` does NOT \"not only when the entire chain matches\".\r\n4. Coordinate `chain` with `SecRuleUpdateActionById` docs so they match each other's limitations/workarounds.\r\n\r\nDocs problems of these areas exist in both 2.x and 3.x docs; sometimes similar, sometimes identical.\r\nI recommend adding some rule + rule update examples to demonstrate how to use them correctly on each 2.x and 3.x. The issue above has premade examples.\r\n\r\nBelow is one suggestion for the 3.x docs.\r\n\r\n### Example 3.x docs for `SecRuleUpdateActionById`\r\n\r\n```\r\n...It has two limitations: it cannot be used to change the ID or phase of a rule. Only the actions that \r\ncan appear only once are overwritten. The actions that are allowed to appear multiple times in a list, \r\nwill be appended to the end of the list.\r\n\r\nExample foo bar...\r\n```\r\n\r\nthen 3 paragraphs and 2 examples under that is another limitation\r\n```\r\nNote : If the target rule is a chained rule, action updates may only be made to the main (first) rule in the chain.\r\n```\r\n\r\nThese need to be combined together. Perhaps something like\r\n\r\n```\r\n...It has limitations\r\n\r\n1. it cannot be used to change the ID or phase of a rule.\r\n2. Only the actions that can appear only once are overwritten. The actions that are allowed to appear \r\n multiple times in a list, will be appended to the end of the list.\r\n3. If the target rule is a chained rule, action updates may only be made to the main (first) rule in the chain. \r\n This is a tighter restriction than modsecurity 2.x.\r\n\r\nExample foo bar...\r\n```", + "summary": "Update documentation for `SecRuleUpdateActionById` and `chain` in both modsecurity 2.x and 3.x. Address the errant information that leads to incorrect rule writing and expose potential underlying bugs. \n\n### Actions:\n\n1. **Clarify Limitations**: Organize limitations and workarounds for `SecRuleUpdateActionById`. Ensure that the behavior of `chain` in modsecurity 2.x is clearly documented, along with any differences in 3.x.\n\n2. **Correct Terminology**: Change \"Non-disruptive rules\" to \"Non-disruptive actions\" in the `chain` documentation.\n\n3. **Fix Misstatements**: Revise the statement regarding the execution of non-disruptive actions to accurately reflect that `setvar` does not execute based on the entire chain match in 2.x. Verify behavior in 3.x.\n\n4. **Align Documentation**: Coordinate the `chain` and `SecRuleUpdateActionById` documentation to ensure they are consistent in outlining their limitations and workarounds.\n\n5. **Add Examples**: Include examples for both 2.x and 3.x that demonstrate correct usage of `SecRuleUpdateActionById` and `chain`. Leverage existing examples from related issues for clarity.\n\n### First Steps:\n\n- Review the linked GitHub issue and related comments to understand the specific gaps in documentation.\n- Identify all instances of `SecRuleUpdateActionById` and `chain` in the current documentation.\n- Draft revised text for problematic sections, incorporating clearer language and examples.\n- Propose changes via a pull request to the appropriate documentation repository.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2860", @@ -62111,9 +64273,9 @@ "repository": 1213, "assignees": [], "labels": [ + 416, 398, - 399, - 416 + 399 ] } }, @@ -62126,6 +64288,7 @@ "node_id": "I_kwDOABQmks5cbLl8", "title": "ModSecurity not finding SecMarker in phase 2", "body": "Not sure if this is a ModSecurity bug or a CoreRuleSet bug, but starting here. \r\n\r\nOS: Debian 11\r\nVersions: libapache2-mod-security2 2.9.3-3+deb11u1 with modsecurity-crs 3.3.0-1+deb11u1, both installed from Debian stable repositories.\r\n\r\nWith SecDebugLogLevel 9, following relevant lines appear in the debug log when making a PUT request to /tomc4 (which should be blocked by default).\r\n```\r\n[/tomc4][4] Recipe: Invoking rule 7f3a5e3c8778; [file \"/usr/share/modsecurity-crs/rules/REQUEST-903.9001-DRUPAL-EXCLUSION-RULES.conf\"] [line \"72\"] [id \"9001000\"].\r\n[/tomc4][5] Rule 7f3a5e3c8778: SecRule \"&TX:crs_exclusions_drupal|TX:crs_exclusions_drupal\" \"@eq 0\" \"phase:1,auditlog,id:9001000,t:none,nolog,ver:OWASP_CRS/3.3.0,skipAfter:END-DRUPAL-RULE-EXCLUSIONS\"\r\n[/tomc4][4] Transformation completed in 0 usec.\r\n[/tomc4][4] Executing operator \"eq\" with param \"0\" against &TX:crs_exclusions_drupal.\r\n[/tomc4][9] Target value: \"0\"\r\n[/tomc4][4] Operator completed in 0 usec.\r\n[/tomc4][4] Warning. Operator EQ matched 0 at TX. [file \"/usr/share/modsecurity-crs/rules/REQUEST-903.9001-DRUPAL-EXCLUSION-RULES.conf\"] [line \"72\"] [id \"9001000\"] [ver \"OWASP_CRS/3.3.0\"]\r\n[/tomc4][4] Rule returned 1.\r\n[/tomc4][9] Skipping after rule 7f3a5e3c8778 id=\"END-DRUPAL-RULE-EXCLUSIONS\" -> mode SKIP_RULES.\r\n[/tomc4][9] Found rule 7f3a5e387188 id=\"END-DRUPAL-RULE-EXCLUSIONS\".\r\n[/tomc4][4] Continuing execution after rule id=\"END-DRUPAL-RULE-EXCLUSIONS\".\r\n[/tomc4][4] Recipe: Invoking rule 7f3a5e387d68; [file \"/usr/share/modsecurity-crs/rules/REQUEST-903.9002-WORDPRESS-EXCLUSION-RULES.conf\"] [line \"26\"] [id \"9002000\"].\r\n[/tomc4][4] Recipe: Invoking rule 7f3a5e387d68; [file \"/usr/share/modsecurity-crs/rules/REQUEST-903.9002-WORDPRESS-EXCLUSION-RULES.conf\"] [line \"26\"] [id \"9002000\"].\r\n[/tomc4][5] Rule 7f3a5e387d68: SecRule \"&TX:crs_exclusions_wordpress|TX:crs_exclusions_wordpress\" \"@eq 0\" \"phase:1,auditlog,id:9002000,t:none,nolog,ver:OWASP_CRS/3.3.0,skipAfter:END-WORDPRESS\"\r\n[/tomc4][4] Transformation completed in 1 usec.\r\n[/tomc4][4] Executing operator \"eq\" with param \"0\" against &TX:crs_exclusions_wordpress.\r\n```\r\nThis is during phase:1, which is expected. Rule 9001000 triggers a skipAfter to END-DRUPAL-RULE-EXCLUSIONS, which is defined at the end of the same file, REQUEST-903.9001-DRUPAL-EXCLUSION-RULES.conf. \r\n\r\nWhen it processes phase 2 however, a similar rules 9001001 is executed, also triggering the skipAfter to the same END-DRUPAL-RULE-EXCLUSIONS SecMarker. However, this time it does not seem to find the marker, skipping each and every other phase 2 rule.\r\n```\r\n[/tomc4][4] Starting phase REQUEST_BODY.\r\n[/tomc4][9] This phase consists of 470 rule(s).\r\n...\r\n[/tomc4][4] Recipe: Invoking rule 7f3a5e3c0190; [file \"/usr/share/modsecurity-crs/rules/REQUEST-903.9001-DRUPAL-EXCLUSION-RULES.conf\"] [line \"81\"] [id \"9001001\"].\r\n[/tomc4][5] Rule 7f3a5e3c0190: SecRule \"&TX:crs_exclusions_drupal|TX:crs_exclusions_drupal\" \"@eq 0\" \"phase:2,auditlog,id:9001001,t:none,nolog,ver:OWASP_CRS/3.3.0,skipAfter:END-DRUPAL-RULE-EXCLUSIONS\"\r\n[/tomc4][4] Transformation completed in 0 usec.\r\n[/tomc4][4] Executing operator \"eq\" with param \"0\" against &TX:crs_exclusions_drupal.\r\n[/tomc4][9] Target value: \"0\"\r\n[/tomc4][4] Operator completed in 0 usec.\r\n[/tomc4][4] Warning. Operator EQ matched 0 at TX. [file \"/usr/share/modsecurity-crs/rules/REQUEST-903.9001-DRUPAL-EXCLUSION-RULES.conf\"] [line \"81\"] [id \"9001001\"] [ver \"OWASP_CRS/3.3.0\"]\r\n[/tomc4][4] Rule returned 1.\r\n[/tomc4][9] Skipping after rule 7f3a5e3c0190 id=\"END-DRUPAL-RULE-EXCLUSIONS\" -> mode SKIP_RULES.\r\n[/tomc4][9] Current rule is id=\"9001100\" [chained 0] is trying to find the SecMarker=\"END-DRUPAL-RULE-EXCLUSIONS\" [stater 0]\r\n[/tomc4][9] Current rule is id=\"9001110\" [chained 0] is trying to find the SecMarker=\"END-DRUPAL-RULE-EXCLUSIONS\" [stater 0]\r\n...\r\n[/tomc4][9] Current rule is id=\"9001216\" [chained 0] is trying to find the SecMarker=\"END-DRUPAL-RULE-EXCLUSIONS\" [stater 0]\r\n[/tomc4][9] Current rule is id=\"9002001\" [chained 0] is trying to find the SecMarker=\"END-DRUPAL-RULE-EXCLUSIONS\" [stater 0]\r\n....\r\n[/tomc4][9] Current rule is id=\"980018\" [chained 0] is trying to find the SecMarker=\"END-DRUPAL-RULE-EXCLUSIONS\" [stater 0]\r\n[/tomc4][4] Hook insert_filter: Adding output filter (r 7f3a5ee6a0a0).\r\n[/tomc4][9] Output filter: Receiving output (f 7f3a5ee3b6d8, r 7f3a5ee6a0a0).\r\n[/tomc4][4] Starting phase RESPONSE_HEADERS.\r\n```\r\nAfter skipping rule 9001216 (defined in REQUEST-903.9001-DRUPAL-EXCLUSION-RULES.conf), it should hit the END-DRUPAL-RULE-EXCLUSIONS SecMarker, but instead continues to skip rule 9002001 which is defined in REQUEST-903.9002-WORDPRESS-EXCLUSION-RULES.conf.\r\n\r\n\r\n ", + "summary": "Investigate the issue where ModSecurity is unable to find the SecMarker in phase 2, leading to the skipping of all subsequent rules. \n\n1. Confirm the environment setup:\n - OS: Debian 11\n - ModSecurity Version: libapache2-mod-security2 2.9.3-3+deb11u1\n - Core Rule Set Version: modsecurity-crs 3.3.0-1+deb11u1\n\n2. Analyze the debug log for PUT requests to `/tomc4`. Ensure that the relevant rules (9001000 and 9001001) are correctly defined in the respective rule files:\n - `REQUEST-903.9001-DRUPAL-EXCLUSION-RULES.conf`\n - Check that the `END-DRUPAL-RULE-EXCLUSIONS` SecMarker is properly defined at the end of this file.\n\n3. Verify the flow of rule execution:\n - Ensure that phase 1 correctly triggers the skipAfter to `END-DRUPAL-RULE-EXCLUSIONS`.\n - When transitioning to phase 2, confirm if the SecMarker is being recognized. \n\n4. Debug further:\n - Increase the `SecDebugLogLevel` if necessary to capture more detailed logs.\n - Check if there are any conflicting rules or misconfigurations that could cause the marker to be unrecognized.\n\n5. Test with simplified rules:\n - Create a minimal set of rules to isolate the problem. Start with a single exclusion rule and gradually add complexity.\n \n6. Review the configuration files:\n - Ensure that all rules are loaded correctly and in the right order.\n - Validate the syntax and any dependencies between rules.\n\n7. Report findings:\n - If the issue persists, document findings and consider opening an issue with detailed logs and configuration for further assistance from the community or maintainers.", "state": "open", "state_reason": "reopened", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2865", @@ -62141,8 +64304,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, - 403 + 403, + 398 ] } }, @@ -62155,6 +64318,7 @@ "node_id": "I_kwDOABQmks5j74wp", "title": "@ipMatch misses IP with multiple values", "body": "v2 master in httpd.\r\nThe following rule works correctly (matches 192.168.59.1 with 192.168.0.0/16):\r\n`SecRule \"REMOTE_ADDR\" \"@ipMatch 192.168.0.0/16\" ...`\r\nHowever, when the IP is mixed with several other ones, it doesn't perform the matching correctly (it should match 192.168.59.1 with 192.168.0.0/16).\r\n\r\nExample from the debug log (with a huge number of IP, I agree):\r\n- Rule 22a21f63bd0: SecRule \"REMOTE_ADDR\" \"@ipMatch 212.222.125.64/26,10.128.2.0/24,10.128.5.0/24,10.128.26.0/24,10.135.40.0/23,10.135.45.0/24,127.0.0.1,127.0.0.1,127.0.0.1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,81.243.240.179,104.40.205.115,212.222.125.64/26,10.128.2.0/24,10.128.5.0/24,10.128.26.0/24,10.135.40.0/23,10.135.45.0/24,10.201.11.231/28,10.201.21.231/28,185.248.206.0/22,176.124.43.192/26,10.201.10.203/29,10.201.10.41,151.216.16.0/20,185.161.121.0/24,193.101.184.0/24,149.36.6.0/24,195.190.82.0/24,194.51.35.0/24,185.194.166.0/24,185.194.167.0/24,47.91.28.27,47.245.0.176,47.245.4.189,8.209.115.195,212.222.125.64/26,10.128.2.0/24,10.128.5.0/24,10.128.26.0/24,10.135.40.0/23,10.135.45.0/24,176.124.43.192/26,10.201.10.203/29,10.201.10.41,127.0.0.1,127.0.0.1,10.201.11.231/28,10.201.21.231/28,127.0.0.1,127.0.0.1,81.243.240.179,104.40.205.115,212.222.125.64/26,10.128.2.0/24,10.128.5.0/24,10.128.26.0/24,10.135.40.0/23,10.135.45.0/24,10.201.11.231/28,10.201.21.231/28,185.248.206.0/22,176.124.43.192/26,10.201.10.203/29,10.201.10.41,151.216.16.0/20,185.161.121.0/24,193.101.184.0/24,149.36.6.0/24,195.190.82.0/24,194.51.35.0/24,185.194.166.0/24,185.194.167.0/24,47.91.28.27,47.245.0.176,47.245.4.189,8.209.115.195,127.0.0.1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,127.0.0.1,127.0.0.1,10.190.10.0/24,81.243.240.179,104.40.205.115\" \"phase:1,t:none,nolog,noauditlog,pass\"\r\n- Transformation completed in 0 usec.\r\n- Executing operator \"ipMatch\" with param \"212.222.125.64/26,10.128.2.0/24,10.128.5.0/24,10.128.26.0/24,10.135.40.0/23,10.135.45.0/24,127.0.0.1,127.0.0.1,127.0.0.1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,81.243.240.179,104.40.205.115,212.222.125.64/26,10.128.2.0/24,10.128.5.0/24,10.128.26.0/24,10.135.40.0/23,10.135.45.0/24,10.201.11.231/28,10.201.21.231/28,185.248.206.0/22,176.124.43.192/26,10.201.10.203/29,10.201.10.41,151.216.16.0/20,185.161.121.0/24,193.101.184.0/24,149.36.6.0/24,195.190.82.0/24,194.51.35.0/24,185.194.166.0/24,185.194.167.0/24,47.91.28.27,47.245.0.176,47.245.4.189,8.209.115.195,212.222.125.64/26,10.128.2.0/24,10.128.5.0/24,10.128.26.0/24,10.135.40.0/23,10.135.45.0/24,176.124.43.192/26,10.201.10.203/29,10.201.10.41,127.0.0.1,127.0.0.1,10.201.11.231/28,10.201.21.231/28,127.0.0.1,127.0.0.1,81.243.240.179,104.40.205.115,212.222.125.64/26,10.128.2.0/24,10.128.5.0/24,10.128.26.0/24,10.135.40.0/23,10.135.45.0/24,10.201.11.231/28,10.201.21.231/28,185.248.206.0/22,176.124.43.192/26,10.201.10.203/29,10.201.10.41,151.216.16.0/20,185.161.121.0/24,193.101.184.0/24,149.36.6.0/24,195.190.82.0/24,194.51.35.0/24,185.194.166.0/24,185.194.167.0/24,47.91.28.27,47.245.0.176,47.245.4.189,8.209.115.195,127.0.0.1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,127.0.0.1,127.0.0.1,10.190.10.0/24,81.243.240.179,104.40.205.115\" against REMOTE_ADDR.\r\n- Target value: \"192.168.59.1\"\r\n- Operator completed in 0 usec.\r\n- Rule returned 0.", + "summary": "Fix the @ipMatch failure to match IP addresses against multiple CIDR values in the httpd v2 master. The rule correctly matches single IPs but fails when the REMOTE_ADDR is combined with a long list of IPs.\n\n### Steps to Address the Issue:\n\n1. **Review the @ipMatch Implementation**: Examine the logic that handles multiple IPs within the @ipMatch operator. Ensure it can process a list of CIDR values effectively.\n\n2. **Optimize Input Handling**: \n - Implement parsing logic that splits the incoming CIDR string into an array for individual processing.\n - Consider checking each CIDR against the REMOTE_ADDR rather than trying to match all at once.\n\n3. **Debug Logging Enhancements**: \n - Add more detailed debug logs to capture the matching process more clearly, especially when multiple IPs are involved. This will help identify where the matching logic fails.\n\n4. **Test Cases**: \n - Create unit tests that simulate various scenarios, including cases with large lists of CIDRs and edge cases (e.g., overlapping CIDRs, invalid formats).\n - Use the IP \"192.168.59.1\" against multiple CIDRs to verify that the expected matches and non-matches occur.\n\n5. **Performance Considerations**: \n - Assess the performance impact of matching against a large number of IPs. If necessary, implement optimizations or limits on the number of CIDRs processed at one time to avoid performance degradation.\n\n6. **Documentation Update**: \n - Update the documentation to reflect changes in handling multiple CIDRs and provide examples of usage.\n\nBy following these steps, enhance the reliability and efficiency of the @ipMatch functionality in the httpd module.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2893", @@ -62170,9 +64334,9 @@ "repository": 1213, "assignees": [], "labels": [ - 398, 400, - 403 + 403, + 398 ] } }, @@ -62185,6 +64349,7 @@ "node_id": "I_kwDOABQmks5k9797", "title": "Unique transaction ID is not unique", "body": "The Unique transaction ID as described in [ModSecurity-2-Data-Formats](https://github.com/SpiderLabs/ModSecurity/wiki/ModSecurity-2-Data-Formats) is not unique.\r\n\r\nMy goal is to send the **SecAuditLog** data chunks (audit log parts A-F and Z) to a database for further analysis. This works so far but I run into a problem here. I can not reassemble every sigle request, because Unique transaction IDs are not unique.\r\n\r\nTo demonstrate this, I have set 'SecAuditLogType Concurrent' and 'SecAuditLogStorageDir /opt/modsecurity/var/audit/'.\r\nNow I run a bunch of test URLs, which will trigger the SecAuditEngine. As a Result I get this:\r\n\r\n~~~bash\r\nwfs-proxy:/opt/modsecurity/var/audit/wwwrun # grep -re '^--' ./\r\n./20230503/20230503-1333/20230503-133313-ZFJGec8R3U_ldaMTbxzC3wAAAAM:--e3bdce7c-A--\r\n./20230503/20230503-1333/20230503-133313-ZFJGec8R3U_ldaMTbxzC3wAAAAM:--e3bdce7c-B--\r\n./20230503/20230503-1333/20230503-133313-ZFJGec8R3U_ldaMTbxzC3wAAAAM:--e3bdce7c-F--\r\n./20230503/20230503-1333/20230503-133313-ZFJGec8R3U_ldaMTbxzC3wAAAAM:--e3bdce7c-E--\r\n./20230503/20230503-1333/20230503-133313-ZFJGec8R3U_ldaMTbxzC3wAAAAM:--e3bdce7c-Z--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeQ-tIwVAaX7zG6cCrAAAABc:--7253ec42-A--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeQ-tIwVAaX7zG6cCrAAAABc:--7253ec42-B--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeQ-tIwVAaX7zG6cCrAAAABc:--7253ec42-F--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeQ-tIwVAaX7zG6cCrAAAABc:--7253ec42-E--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeQ-tIwVAaX7zG6cCrAAAABc:--7253ec42-Z--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeWUtrU7v5kvWR0JxxwAAABE:--7253ec42-A--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeWUtrU7v5kvWR0JxxwAAABE:--7253ec42-B--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeWUtrU7v5kvWR0JxxwAAABE:--7253ec42-F--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeWUtrU7v5kvWR0JxxwAAABE:--7253ec42-E--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeWUtrU7v5kvWR0JxxwAAABE:--7253ec42-Z--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeTmD8lkS7lgL73cABAAAAAE:--e3bdce7c-A--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeTmD8lkS7lgL73cABAAAAAE:--e3bdce7c-B--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeTmD8lkS7lgL73cABAAAAAE:--e3bdce7c-F--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeTmD8lkS7lgL73cABAAAAAE:--e3bdce7c-E--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeTmD8lkS7lgL73cABAAAAAE:--e3bdce7c-Z--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeVOLgwIBaW4q2ekUFAAAABU:--7253ec42-A--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeVOLgwIBaW4q2ekUFAAAABU:--7253ec42-B--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeVOLgwIBaW4q2ekUFAAAABU:--7253ec42-F--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeVOLgwIBaW4q2ekUFAAAABU:--7253ec42-E--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeVOLgwIBaW4q2ekUFAAAABU:--7253ec42-Z--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeSWi0RqhZpm4t4h8LgAAABg:--23b0d761-A--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeSWi0RqhZpm4t4h8LgAAABg:--23b0d761-B--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeSWi0RqhZpm4t4h8LgAAABg:--23b0d761-F--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeSWi0RqhZpm4t4h8LgAAABg:--23b0d761-E--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeSWi0RqhZpm4t4h8LgAAABg:--23b0d761-Z--\r\n./20230503/20230503-1333/20230503-133313-ZFJGefyFgPo-zywsXm9qygAAAAw:--c96b7719-A--\r\n./20230503/20230503-1333/20230503-133313-ZFJGefyFgPo-zywsXm9qygAAAAw:--c96b7719-B--\r\n./20230503/20230503-1333/20230503-133313-ZFJGefyFgPo-zywsXm9qygAAAAw:--c96b7719-F--\r\n./20230503/20230503-1333/20230503-133313-ZFJGefyFgPo-zywsXm9qygAAAAw:--c96b7719-E--\r\n./20230503/20230503-1333/20230503-133313-ZFJGefyFgPo-zywsXm9qygAAAAw:--c96b7719-Z--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeWkv4RhdUr82XzP3MQAAAAc:--e3bdce7c-A--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeWkv4RhdUr82XzP3MQAAAAc:--e3bdce7c-B--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeWkv4RhdUr82XzP3MQAAAAc:--e3bdce7c-F--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeWkv4RhdUr82XzP3MQAAAAc:--e3bdce7c-E--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeWkv4RhdUr82XzP3MQAAAAc:--e3bdce7c-Z--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeSAkZTeubWGVgOyg-wAAAA8:--a09a2874-A--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeSAkZTeubWGVgOyg-wAAAA8:--a09a2874-B--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeSAkZTeubWGVgOyg-wAAAA8:--a09a2874-F--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeSAkZTeubWGVgOyg-wAAAA8:--a09a2874-E--\r\n./20230503/20230503-1333/20230503-133313-ZFJGeSAkZTeubWGVgOyg-wAAAA8:--a09a2874-Z--\r\n./20230503/20230503-1333/20230503-133313-ZFJGefHBvyBvLWBOcUuWaAAAAAs:--112c5e06-A--\r\n./20230503/20230503-1333/20230503-133313-ZFJGefHBvyBvLWBOcUuWaAAAAAs:--112c5e06-B--\r\n./20230503/20230503-1333/20230503-133313-ZFJGefHBvyBvLWBOcUuWaAAAAAs:--112c5e06-F--\r\n./20230503/20230503-1333/20230503-133313-ZFJGefHBvyBvLWBOcUuWaAAAAAs:--112c5e06-E--\r\n./20230503/20230503-1333/20230503-133313-ZFJGefHBvyBvLWBOcUuWaAAAAAs:--112c5e06-Z--\r\n~~~\r\n\r\nAs you can see, 'e3bdce7c' and '7253ec42' are the UIDs for three different requests each. May be I did something wrong or misunderstood something, but that is, what I see.\r\n\r\nThis is a mod_security2 module for a Apache HTTP Server 2.4 on a Suse system, rpm version is apache2-mod_security2-2.9.4-150400.3.6.1.x86_64.\r\n\r\nOh, one more thing: I suspect, this UID may may somehow stick to the httpd worker child. This was tested with MPM models 'prefork' and 'event'.\r\n", + "summary": "**Fix Unique Transaction ID Issue in ModSecurity**\n\n1. Identify the root cause of non-unique transaction IDs in the ModSecurity logs when using `SecAuditLogType Concurrent`.\n\n2. Verify the configuration settings:\n - Ensure `SecAuditLogType Concurrent` is correctly set.\n - Check `SecAuditLogStorageDir /opt/modsecurity/var/audit/`.\n\n3. Analyze the audit log files:\n - Use the provided grep command to list all unique transaction IDs and confirm duplicates.\n - Investigate the UIDs like `e3bdce7c` and `7253ec42` to understand their association with multiple requests.\n\n4. Assess potential issues with the MPM models:\n - Test with both 'prefork' and 'event' models, as the issue may relate to how the HTTP server handles worker threads and their respective transaction IDs.\n\n5. Review the ModSecurity source code:\n - Focus on the logic that generates and assigns unique transaction IDs. Ensure it accounts for concurrent request handling.\n\n6. Implement a solution:\n - Modify the transaction ID generation logic to guarantee uniqueness across concurrent requests.\n - Consider adding a timestamp or a unique identifier based on the worker processing the request.\n\n7. Test the solution:\n - Rerun the tests that previously resulted in duplicate IDs.\n - Check the audit logs to confirm that each request now has a unique transaction ID.\n\n8. Document the findings and modifications:\n - Update the issue tracker with detailed findings and steps taken.\n - Provide examples showing the successful uniqueness of transaction IDs post-fix.\n\nBy following these steps, ensure that each transaction ID is unique, thereby allowing proper reassembly of request log data for further analysis.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2896", @@ -62200,8 +64365,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, - 403 + 403, + 398 ] } }, @@ -62214,6 +64379,7 @@ "node_id": "I_kwDOABQmks5p4MAx", "title": "NEW FEATURE: GraphQL Security", "body": "ModSecurity should include a new feature to parse graphQL queries. Nowadays, many big companies are using graphQL. It involves complex configurations that may expose the applications to various security vulnerabilities, such as, DoS Attacks, Injection Attacks, Introspection Queries (which can expose sensitive data), or other malicious queries. \r\n\r\nModSecurity should provide native parsing of GraphQL requests and enforces security checks to protect against these attacks.", + "summary": "Implement a new feature in ModSecurity to support GraphQL request parsing. Recognize the growing use of GraphQL by major companies and the associated security risks, including DoS attacks, injection attacks, and introspection queries that can reveal sensitive information.\n\nTo tackle this problem, follow these steps:\n\n1. **Research GraphQL Specification**: Familiarize yourself with the GraphQL query language and its structure to understand how to effectively parse requests.\n\n2. **Design Parsing Logic**: Develop a robust parsing mechanism that can handle the intricacies of GraphQL queries, including nested queries and fragments.\n\n3. **Define Security Rules**: Create a set of security rules specifically for GraphQL to mitigate risks such as malicious queries and data exposure.\n\n4. **Integrate with Existing ModSecurity Framework**: Ensure that the new GraphQL parsing feature integrates seamlessly with ModSecurity’s existing architecture.\n\n5. **Test with Various Scenarios**: Implement unit tests and scenarios to validate the effectiveness of the GraphQL security checks against known vulnerabilities.\n\n6. **Document the Feature**: Update ModSecurity documentation to include information on how to use the new GraphQL security feature and best practices for configuration.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2919", @@ -62242,6 +64408,7 @@ "node_id": "I_kwDOABQmks5rQKOv", "title": "AuditLog JSON large data field", "body": "We have Nginx(1.22.1) + ModSecurity-nginx(1.0.3) + ModSecurity(3090100) + OWASP_CRS(3.3.4). And the Rule that works on the contents of the file. \r\nConfiguration contain `SecAuditLogParts ABIJDEFHZ` and `SecAuditLogFormat JSON`.\r\n\r\nRule:\r\n`SecRule ARGS_NAMES|ARGS|REQUEST_BODY|XML:/* \"@rx (?:get|post|head|options|connect|put|delete|trace|track|patch|propfind|propatch|mkcol|copy|move|lock|unlock)\\s+(?:\\/|\\w)[^\\s]*(?:\\s+http\\/\\d|[\\r\\n])\" \\ \"id:921110,\\ phase:2,\\ block,\\ capture,\\ t:none,t:urlDecodeUni,t:htmlEntityDecode,t:lowercase,\\ msg:'HTTP Request Smuggling Attack',\\ logdata:'Matched Data: %{TX.0} found within %{MATCHED_VAR_NAME}: %{MATCHED_VAR}',\\ tag:'application-multi',\\ tag:'language-multi',\\ tag:'platform-multi',\\ tag:'attack-protocol',\\ tag:'paranoia-level/1',\\ tag:'OWASP_CRS',\\ tag:'capec/1000/210/272/220/33',\\ ctl:auditLogParts=+E,\\ ver:'OWASP_CRS/3.3.4',\\ severity:'CRITICAL',\\ setvar:'tx.http_violation_score=+%{tx.critical_anomaly_score}',\\ setvar:'tx.anomaly_score_pl1=+%{tx.critical_anomaly_score}'\"`\r\n\r\nIf we send a **large** file, JSON AuditLog contain full request body in `transaction.messages.details.data`.\r\nIf change the log to a native, then the data field is automatically trimmed and becomes short.\r\n\r\nWe expect to get the same short log in the Json format as in the Native. Otherwise it takes up a lot of space and we can't send it to SIEM or database(via Vector) without transforms.\r\n\r\nIs there any way to bypass this and force MATCHED_VAR to be trimmed for the data field for Json the same as for Native? or some alternative for `SecAuditLogParts I` to exclude files from message?\r\n\r\n
Screenshot part of Json Log\r\n\r\n![image](https://github.com/SpiderLabs/ModSecurity/assets/24451720/ab6c4567-d5bf-4840-8ff7-c60b072f97f6)\r\n\r\n
\r\n\r\n\r\n
Full Native Log\r\n\r\n```\r\n---flqU0CRG---A--\r\n[11/Jul/2023:10:35:31 +0000] 168907173111.651220 xxx.xxx.xxx.xxx 53019 xxx.xxx.xxx.xxx 80\r\n---flqU0CRG---B--\r\nPOST / HTTP/1.1\r\nAccept: */*\r\nAccept-Language: ru-RU,ru;q=0.9\r\nContent-Length: 2600222\r\nHost: test.com\r\nUser-Agent: PostmanRuntime/7.32.3\r\nPostman-Token: 0be4a82e-1059-47f5-a5ce-5f9fb21a2121\r\nCache-Control: no-cache\r\nConnection: keep-alive\r\nAccept-Encoding: gzip, deflate, br\r\nContent-Type: multipart/form-data; boundary=--------------------------131585514483542585006929\r\n\r\n---flqU0CRG---D--\r\n\r\n---flqU0CRG---E--\r\n\\x0d\\x0a403 Forbidden\\x0d\\x0a\\x0d\\x0a

403 Forbidden

\\x0d\\x0a
nginx
\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\r\n\r\n---flqU0CRG---F--\r\nHTTP/1.1 403\r\nStrict-Transport-Security: max-age=31536000; includeSubDomains\r\nmime.types: \r\nDate: Tue, 11 Jul 2023 10:35:31 GMT\r\nX-XSS-Protection: 1; mode=block\r\nConnection: keep-alive\r\nX-Content-Type-Options: nosniff\r\nContent-Type: text/html\r\nContent-Length: 146\r\ninclude: \r\nServer: \r\nServer: \r\nContent-Security-Policy: frame-ancestors 'self'\r\n\r\n---flqU0CRG---H--\r\nModSecurity: Access denied with code 403 (phase 2). Matched \"Operator `Rx' with parameter `(?:get|post|head|options|connect|put|delete|trace|track|patch|propfind|propatch|mkcol|copy|move|lock|unlock)\\s+(?:\\/|\\w)[^\\s]*(?:\\s+http\\/\\d|[\\r\\n])' against variable `REQUEST_BODY' (Value: `----------------------------131585514483542585006929\\x0d\\x0aContent-Disposition: form-data; name=\"\"; (3200152 characters omitted)' ) [file \"/etc/nginx/conf/modsecurity/rules/OWASP_3.3.4/REQUEST-921-PROTOCOL-ATTACK.conf\"] [line \"34\"] [id \"921110\"] [rev \"\"] [msg \"HTTP Request Smuggling Attack\"] [data \"Matched Data: get dg==\\x0d found within REQUEST_BODY: ----------------------------131585514483542585006929\\x0d\\x0acontent-disposition: form-data; name=\\x22\\x22; filename=\\x2210320000001.bson\\x22\\x0d\\x0acontent-type: application (2600073 characters omitted)\"] [severity \"2\"] [ver \"OWASP_CRS/3.3.4\"] [maturity \"0\"] [accuracy \"0\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-protocol\"] [tag \"paranoia-level/1\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/210/272/220/33\"] [hostname \"xxx.xxx.xxx.xxx\"] [uri \"/\"] [unique_id \"168907173111.651220\"] [ref \"o749684,9v364,2600222t:urlDecodeUni,t:htmlEntityDecode,t:lowercase\"]\r\n\r\n---flqU0CRG---I--\r\n\r\n---flqU0CRG---J--\r\n\r\n---flqU0CRG---Z--\r\n\r\n\r\n```\r\n
\r\n\r\n", + "summary": "Implement a solution to trim large data fields in JSON AuditLogs generated by ModSecurity when dealing with large request bodies. The current configuration outputs the full request body in `transaction.messages.details.data`, leading to excessively large log files that are impractical for SIEM or database integration.\n\n### Steps to Address the Issue:\n\n1. **Investigate Log Formatting**: Review the existing configuration options for `SecAuditLogParts` to determine if there are alternative flags that can be used to exclude large request bodies or truncate them.\n\n2. **Use Alternative Log Parts**: Explore using alternative log parts for JSON that may allow you to skip over large data fields. Specifically, consider the implications of using `ctl:auditLogParts` to modify the log output dynamically.\n\n3. **Modify the Rule**: Adjust the existing ModSecurity rule to limit the size of the data captured in `MATCHED_VAR`. Utilize transformations like `t:trim` if available, or create a custom transformation to truncate the data to a manageable size.\n\n4. **Benchmark with Native Logs**: Compare the output of native logs with JSON logs to identify specific data fields that are truncated in native logs but not in JSON. This may provide clues on how to manipulate the JSON output.\n\n5. **Test Changes in a Staging Environment**: Before deploying changes to production, implement the modifications in a controlled staging environment to ensure they behave as expected without introducing new issues.\n\n6. **Document Findings**: Keep a record of the changes made and their effects on log size and content to facilitate future troubleshooting and adjustments.\n\nBy following these steps, aim to achieve a JSON output that mirrors the brevity of native logs while maintaining necessary logging functionality.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2925", @@ -62257,8 +64424,8 @@ "repository": 1213, "assignees": [], "labels": [ - 399, - 404 + 404, + 399 ] } }, @@ -62271,6 +64438,7 @@ "node_id": "I_kwDOABQmks5rU1yW", "title": "Regex in setvar variables", "body": "**Context:**\r\nCurrently, (unescaped) backslashes are forbidden in variables content.\r\nIn msre_parse_generic():\r\n```\r\nif (*p == '\\\\') {\r\n if ((*(p + 1) == '\\0') || ((*(p + 1) != '\\'') && (*(p + 1) != '\\\\'))) {\r\n [error & return]\r\n }\r\n p++;\r\n```\r\nThe only case a backslash is accepted is when it escapes a single quote or a backslash.\r\n\r\n**Problem:**\r\nYou cannot store a regex in a variable: _setvar:'tx.var=\\babc\\b'_\r\nWhen doing this manually, you can escape the string obviously: _setvar:'tx.var=\\\\babc\\\\b'_\r\nBut when you use a macro like the following:\r\n```\r\n\r\n SecAction ...,setver:'var=$regex',...\r\n SecRule ARG \"$regex\" ...\r\n\r\n```\r\nthere's no way to call the macro with a parameter compatible with both directives\r\n\r\n**Solution:**\r\nWe could be lax (and still compatible with the current behaviour):\r\n- If it's followed by a quote or a backslash, accept and \"eats\" the escaping backslash\r\n- In all other cases, accept and don't \"eat\" the backslash\r\n- the code change is trivial\r\n\r\nDoes somebody see any other solution?", + "summary": "Implement support for regex in setvar variables by modifying the msre_parse_generic() function to allow backslashes in specific contexts. \n\n**Technical Details:**\n1. Update the conditional check in msre_parse_generic() to:\n - Accept a backslash if followed by a single quote or another backslash (current behavior).\n - Accept a backslash in all other cases without \"eating\" it.\n \n2. Modify the parsing logic to handle these cases correctly by adjusting the pointer accordingly.\n\n**First Steps:**\n1. Review the current implementation of msre_parse_generic() to identify the exact location and logic that restricts backslashes.\n2. Implement the proposed changes to the backslash handling logic.\n3. Test the implementation with various regex inputs in setvar to ensure both the new and existing behaviors function as expected.\n4. Consider adding unit tests to cover edge cases, including regex patterns with multiple backslashes and quotes.\n5. Document the changes made in a clear manner for future reference and clarity in the codebase.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2927", @@ -62300,6 +64468,7 @@ "node_id": "I_kwDOABQmks5rWiFx", "title": "SecRuleUpdateActionById may not replace non-disruptive, non-additive actions", "body": "SecRuleUpdateActionById can successfully be used to:\r\n- add actions where multiple of the same action is allowed in a rule (e.g. setvar)\r\n- overwrite disruptive actions (e.g. change 'deny' to 'pass')\r\n- add a non-disruptive, non-additive action when that action did not already exist in the rule\r\n\r\nHowever, existing actions that are neither a replacement for a 'disruptive' action nor additive to previous actions do not get replaced as expected. E.g. with:\r\n```\r\n#SecRule ARGS \"@rx 000\" \"id:98001,phase:2,deny,status:403,msg:'abc'\"\r\n#SecRuleUpdateActionById 98001 \"msg:'def'\"\r\n```\r\n... the msg content 'abc' is what will appear in the audit log.\r\n\r\nIn ModSecurity v2, the substitute text ('def') would appear in the log.\r\n", + "summary": "Update SecRuleUpdateActionById to allow replacement of non-disruptive, non-additive actions in rules. Currently, it successfully adds actions for rules that permit multiple instances (like setvar), overwrites disruptive actions (changing 'deny' to 'pass'), and adds non-disruptive actions only if they do not already exist in the rule. However, it fails to replace existing non-disruptive messages, resulting in the original message being logged instead of the updated one.\n\nTo tackle this problem, follow these steps:\n\n1. Review the implementation of SecRuleUpdateActionById to identify conditions where non-disruptive actions are ignored.\n2. Modify the logic to check for existing non-disruptive actions and ensure they can be replaced with new values.\n3. Test the changes to confirm that the new message appears in the audit log as expected.\n4. Create unit tests to cover scenarios involving non-disruptive action replacements to prevent future regressions.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2928", @@ -62328,6 +64497,7 @@ "node_id": "I_kwDOABQmks5sFme0", "title": "Target transformed even when ignored via ctl: action", "body": "v2 master\r\n\r\nWhen ignoring some targets, like a huge ARG, it's not matched but still transformed.\r\nIn some cases (several big ARGS), it can have dramatic impacts on performance.\r\n\r\nDebug log showing the problem (ARGS:ParamToIgnore=a%2520b):\r\n```\r\n[9] fetch_target_exception: Found exception target list [ARGS:ParamToIgnore] for rule id 2000415\r\n[9] fetch_target_exception: Target ARGS:ParamToIgnore will not be processed.\r\n[4] Executing operator \"rx\" with param \"a\" against ARGS:ParamToIgnore skipped.\r\n[9] T (0) urlDecode: \"a b\"\r\n[9] T (0) htmlEntityDecode: \"a b\"\r\n[9] T (0) jsDecode: \"a b\"\r\n[9] T (0) compressWhitespace: \"a b\"\r\n[9] T (0) lowercase: \"a b\"\r\n[4] Rule returned 0.\r\n```", + "summary": "Investigate the issue of targets being transformed even when specified to be ignored via the ctl: action in the v2 master branch. The transformation of large ARGs, despite being ignored, leads to significant performance degradation.\n\n1. Review the debug logs for the parameter `ParamToIgnore` to confirm that it is indeed being ignored in processing but still undergoes transformations.\n2. Identify the specific rules that apply to the ignored targets and analyze their implementation to understand why transformations are still occurring.\n3. Check the control logic responsible for handling ignored targets to ensure it correctly bypasses all transformation operations.\n4. Consider modifying the execution flow to prevent any transformation functions (like `urlDecode`, `htmlEntityDecode`, etc.) from being called on ignored targets.\n5. Run performance benchmarks to measure the impact of the ignored parameters on overall system performance before and after implementing changes. \n\nBegin by replicating the issue in a controlled environment and gathering additional logs to better understand the execution path for ignored targets.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2932", @@ -62343,8 +64513,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, - 400 + 400, + 398 ] } }, @@ -62357,6 +64527,7 @@ "node_id": "I_kwDOABQmks5t9ju_", "title": "Cross-Compatible JSON Body Exclusion Ruleset for Different ModSecurity Versions", "body": "Hello everyone,\r\n\r\n**Description:**\r\nI've come across an issue while working with different versions of ModSecurity (v2 and v3) and their respective variable names when parsing JSON bodies using the JSON body processor.\r\n\r\n**Problem:**\r\nThe crux of the problem lies in the fact that v2 and v3 of ModSecurity employ distinct variable names during JSON body parsing. This discrepancy becomes particularly challenging when attempting to create an exclusion ruleset that works seamlessly across all ModSecurity versions. Currently, the only two viable options are duplicating `ctl:remove*` actions or duplicating rules. However, both of these alternatives demand more CPU consumption than necessary (other than possible -still unknown- security implications/bypass).\r\n\r\n**Example:**\r\nTo illustrate this issue further, consider the following JSON body:\r\n```\r\n{\"foo\": [{\"bar\":\"payload\"}]}\r\n```\r\nIn v2, the corresponding variable name would be: `ARGS:foo.foo.bar`\r\nWhile in v3, it becomes: `ARGS:json.foo.array_0.bar`\r\n\r\n**If I'm not wrong**, this means that I need a rule exclusion like:\r\n```\r\nSecRule REQUEST_URI \"@beginsWith /mypath\" \"id:1234,\\\r\n ctl:ruleRemoveTargetByTag=OWASP_CRS;ARGS:foo.foo.bar,\\\r\n ctl:ruleRemoveTargetByTag=OWASP_CRS;ARGS:json.foo.array_0.bar,\\\r\n ...\"\r\n```\r\n\r\nIf the array within the \"foo\" object in my example comprises 100 entries, it's quite clear that this would necessitate either 200 ctl actions or two distinct rules.\r\n\r\nAre there other ways to do this?\r\n\r\nIf not: **Proposed Solution**\r\n- add an `ARGS:json` variable like v3 on v2\r\n- abstract the ARGS names on rule definition in order to have only 1 syntax\r\n- use JSON path syntax like: `$.foo[0].bar` or `ARGS:json.foo[0].bar` and would be awesome an `ARGS:json.foo[].bar` to select all entries of the \"foo\" array\r\n", + "summary": "Implement a cross-compatible JSON body exclusion ruleset for ModSecurity v2 and v3. Address the variable name discrepancies that complicate JSON body parsing in both versions. \n\n1. **Identify the Problem**: Note that ModSecurity v2 uses `ARGS:foo.foo.bar` for JSON payloads, while v3 uses `ARGS:json.foo.array_0.bar`. This difference requires duplicating exclusion actions or rules, leading to unnecessary CPU overhead.\n\n2. **Analyze Example**: Use the JSON body `{\"foo\": [{\"bar\":\"payload\"}]}` to illustrate the issue. Recognize that a rule must handle multiple entries in the \"foo\" array, potentially doubling the exclusion actions required.\n\n3. **Propose Solutions**:\n - Introduce a unified variable name such as `ARGS:json` in v2 to mirror v3's structure.\n - Allow abstraction in rule definitions to standardize syntax across versions.\n - Implement JSON path syntax for rule targeting, e.g., `$.foo[0].bar` or `ARGS:json.foo[].bar` to facilitate selecting all entries in the \"foo\" array.\n\n4. **First Steps**:\n - Review the current ModSecurity documentation for both versions to confirm variable name behaviors.\n - Draft a proposal for modifications to the ModSecurity codebase or configuration files to support the suggested changes.\n - Test the proposed syntax with structured JSON payloads to ensure compatibility and efficiency. \n\nBy taking these steps, streamline the exclusion ruleset for JSON body parsing across ModSecurity versions.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2942", @@ -62386,6 +64557,7 @@ "node_id": "I_kwDOABQmks5u1eZ3", "title": "verifySSN: Area code can be larger than 740", "body": "The United States Social Security Number area code used to startwith a number less than 740 bt this is no longer the case:\r\n\r\nhttps://www.ssa.gov/history/ssn/geocard.html\r\n\r\nThere are now routinely SSNs with area code larger than 740.\r\n\r\nhttps://github.com/SpiderLabs/ModSecurity/blob/60f802e4801c8a4fee8e2caac90462e53651971f/src/operators/verify_ssn.cc#L103", + "summary": "Update the `verifySSN` function to accommodate Social Security Numbers (SSNs) with area codes larger than 740. \n\n1. **Review the current implementation**: Examine the code at `src/operators/verify_ssn.cc` around line 103 to understand how area codes are currently validated.\n\n2. **Modify the validation logic**: Adjust the condition that checks the area code to allow for numbers greater than or equal to 740.\n\n3. **Consult SSA documentation**: Refer to the provided SSA link for any additional context or updates regarding SSN structure and area codes.\n\n4. **Implement tests**: Create unit tests to ensure the new logic correctly validates SSNs with area codes above 740 while still rejecting invalid formats.\n\n5. **Submit a pull request**: After testing, submit the changes for review to integrate the updated validation logic into the main codebase.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2970", @@ -62412,6 +64584,7 @@ "node_id": "I_kwDOABQmks5vIhjX", "title": "PCRE2 performance", "body": "Hello,\r\nDid anybody test the performance of using PCRE2?\r\nOn Rocky 9, I have a loss of 30% when compiling with pcre2 (with pcre-jit & pcre-study). On Windows it's much slower as well (cannot measure it precisely).\r\nDoes somebody have different results?\r\nThanks\r\n\r\nPS: could this be linked to https://github.com/SpiderLabs/ModSecurity/issues/2966 ?\r\n", + "summary": "Test the performance of PCRE2 on Rocky 9 and Windows. Note significant performance loss, approximately 30% on Rocky 9 when compiling with pcre2, including pcre-jit and pcre-study. Observe that Windows performance is also notably slower, though precise measurements are unclear.\n\nInvestigate whether these performance issues link to the issue reported in ModSecurity (#2966). \n\nFirst steps to tackle the problem:\n1. Run benchmark tests comparing PCRE2 with other regex libraries on both Rocky 9 and Windows.\n2. Profile the compilation process to identify bottlenecks when using PCRE2.\n3. Review ModSecurity issue #2966 for relevant insights or shared experiences.\n4. Consider experimenting with different PCRE2 compilation flags or configurations to optimize performance.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2972", @@ -62427,8 +64600,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, 403, + 398, 415 ] } @@ -62442,6 +64615,7 @@ "node_id": "I_kwDOABQmks5vIvpP", "title": "Invalid PCRE2 JiT check", "body": "In msc_pcre.c, we have the following code at several places:\r\n```\r\n#ifdef WITH_PCRE_STUDY\r\n #ifdef WITH_PCRE_JIT\r\n int jit;\r\n #endif\r\n#endif\r\n[...]\r\n#ifdef WITH_PCRE_STUDY\r\n #ifdef WITH_PCRE_JIT\r\n if (msr->txcfg->debuglog_level >= 4) {\r\n #ifdef WITH_PCRE2\r\n rc = regex->jit_compile_rc;\r\n #else\r\n rc = msc_fullinfo(regex, PCRE_INFO_JIT, &jit);\r\n #endif\r\n if ((rc != 0) || (jit != 1)) {\r\n *error_msg = apr_psprintf(msr->mp,\r\n \"Rule %pp [id \\\"%s\\\"][file \\\"%s\\\"][line \\\"%d\\\"] - \"\r\n \"Execution error - \"\r\n \"Does not support JIT (%d)\",\r\n rule,((rule->actionset != NULL)&&((rule->actionset->id != NULL)&&\r\n (rule->actionset->id != NOT_SET_P))) ? rule->actionset->id : \"-\",\r\n rule->filename != NULL ? rule->filename : \"-\",\r\n rule->line_num,rc);\r\n msr_log(msr, 4, \"%s.\", *error_msg);\r\n }\r\n }\r\n #endif\r\n#endif\r\n```\r\njit is not initialized when using PCRE2.\r\nFix: initialize jit to 1 (will potentially be changed to 0 if msc_fullinfo succeeds).\r\nBetter fix: this code is copied 7 times in this file; we should put it in a static function.\r\n\r\nQuestion: is it normal that, for PCRE2, we don't check if jit is enabled? Shouldn't we call msc_fullinfo succeeds() as well?", + "summary": "Address the invalid PCRE2 JIT check in `msc_pcre.c`. \n\n1. Identify the code sections where the JIT check is performed without initializing the `jit` variable when using PCRE2. \n2. Modify the code to ensure `jit` is initialized to `1` prior to the conditional checks. \n3. Evaluate the necessity of calling `msc_fullinfo` for PCRE2, as the current implementation lacks this check. \n4. Refactor the duplicated code (found in 7 instances) into a static function to avoid redundancy and improve maintainability.\n\nFirst steps:\n- Locate all instances of the JIT check code within `msc_pcre.c`.\n- Implement the initialization of `jit` to `1` in the relevant sections.\n- Introduce a static function that encompasses the JIT checking logic.\n- Assess and potentially implement a call to `msc_fullinfo` to verify JIT support for PCRE2.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2973", @@ -62457,8 +64631,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, - 400 + 400, + 398 ] } }, @@ -62471,6 +64645,7 @@ "node_id": "I_kwDOABQmks5ylzw6", "title": "IIS multiple response headers with the same name", "body": "I am using IIS Application Request Routing (ARR) with ModSecurity. ModSecurity is installed and configured with the OWASP Core rule set on Windows 2022. I am trying to improve the rule set by incorporating session based logic. \r\n\r\n\r\nI am using this rule to extract the session cookie:\r\n```\r\nSecRule RESPONSE_HEADERS:/Set-Cookie2?/ \"(?i:(wordpresspass_.*?|j?sessionid|(php)?sessid|(asp|jserv|jw)?session[-_]?(id)?|cf(id|token)|sid)=([^\\s]+)\\;\\s?)\" \\\r\n\t\"chain,phase:5,id:'981062',t:none,pass,nolog,capture, \\\r\n\tsetsid:%{TX.6}, \\\r\n\tsetvar:session.sessionid=%{TX.6}, \\\r\n\tsetvar:session.valid=1, \\\r\n\texpirevar:session.valid=3600, \\\r\n\tsetvar:session.country_name=%{geo.country_name}\"\r\n\tSecRule UNIQUE_ID \"(.*)\" \"chain,t:none,t:sha1,t:hexEncode,capture, \\\r\n\t\tsetvar:session.csrf_token=%{TX.1}\"\r\n\t\tSecRule REMOTE_ADDR \"^(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.)\" \"chain,capture,setvar:session.ip_block=%{tx.1}\"\r\n\t\t\tSecRule REQUEST_HEADERS:User-Agent \".*\" \"t:none,t:sha1,t:hexEncode,setvar:session.ua=%{matched_var}\"\r\n```\r\n\r\nThe problem is, only the last Set-Cookie set by the application is found. I wrote a rule to help me determine how many Set-Cookie headers ModSecurity thinks I have by : \r\n```\r\nSecRule &RESPONSE_HEADERS:/Set-Cookie/ \"@gt 1\" \"chain,phase:5,id:'981062',t:none,pass,nolog\" \r\n\tSecRule RESPONSE_HEADERS:/Set-Cookie2?/ \"((?s).*)\" \\\r\n\t\t\"t:none,nolog,capture, \\\r\n\t\tsetsid:%{TX.1}, \\\r\n\t\tsetvar:session.sessionid=%{TX.1}, \\\r\n\t\tsetvar:session.valid=1\"\r\n```\r\nWhen greater than 1 (“@gt 1”) is set, the session file never gets created, but if it “@gt 0” the last Set-Cookie header is saved.\r\n\r\n \r\nThis https://coreruleset.org/20230717/cve-2023-38199-multiple-content-type-headers/\r\nStates: \r\n\r\n\r\n“This issue was initially reported to the CRS project on 24 March 2023 via the ModSecurity project. We quickly established that the CRS reference platform was not affected (ModSecurity 2.9.x on Apache 2.4). This is because the Apache web server merges identically titled header fields into one, separating any different values with commas (as described in [the HTTP standard](https://httpwg.org/specs/rfc9110.html#field.lines)). The problem is that the situation is different on other platforms. While Apache (our reference platform) merges Content-Type headers together allowing this behavior to be detected, Nginx, for example, will retain each separate, identically named header: no merging occurs.”\r\n\r\nThis states: https://learn.microsoft.com/en-us/answers/questions/555576/setting-multiple-response-headers-with-same-name-d “IIS doesn't allow custom response headers with the same name.”\r\n\r\nChrome at the end user, does show all 4 Set-Cookie headers in Developer Tools.\r\n\r\nWhat IIS, ARR and/or ModSecurity configuration setting will allow ModSecurity to see all Set-Cookie headers?", + "summary": "Investigate the issue of IIS not handling multiple response headers with the same name, particularly for Set-Cookie headers. \n\n1. Review the current IIS Application Request Routing (ARR) and ModSecurity configurations on Windows 2022.\n2. Confirm the behavior of IIS regarding multiple headers, referencing documentation that states IIS does not allow custom response headers with duplicate names.\n3. Analyze the ModSecurity rules and their effectiveness in capturing multiple Set-Cookie headers. \n4. Test the current setup with modifications to the rules to see if capturing mechanisms can be adjusted. For instance, consider using:\n - `SecRule RESPONSE_HEADERS:/Set-Cookie/` to capture all Set-Cookie headers instead of just the last one.\n5. Explore potential IIS settings or ARR modules that might aggregate or rewrite headers, and look into enabling header merging.\n6. Consult the OWASP Core Rule Set documentation for any updates or patches addressing this issue.\n7. If necessary, consider altering the application logic to ensure unique Set-Cookie headers or investigate the possibility of using a different web server that supports this functionality.\n\nBegin with logging all response headers to verify their presence and understand if the issue lies in header visibility or ModSecurity rule execution.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/2991", @@ -62497,6 +64672,7 @@ "node_id": "I_kwDOABQmks51V8ZM", "title": "408 http code generated by mod_reqtimeout is not detected", "body": "**Describe the bug**\r\n\r\n408 http code is not detected\r\n\r\n**Logs and dumps**\r\n1. SecDebugLog level 9:\r\nempty\r\n\r\nIf you change rule from \"@streq 408\" to \"@streq 200\" (for TEST PURPOSES ONLY), it blocks **correctly** and fill the SecDebugLog(SecDebugLog = 9) with informations. If you leave \"@streq 408\", debug log is empty and not blocking.\r\n\r\n2. Apache error log:\r\nempty\r\n\r\n3. Output of apache access log:\r\n```\r\nXXX.XXX.XXX.XXX - - [30/Oct/2023:16:45:33 +0100] \"GET / HTTP/1.1\" 408 448\r\nXXX.XXX.XXX.XXX - - [30/Oct/2023:16:45:33 +0100] \"GET / HTTP/1.1\" 408 448\r\nXXX.XXX.XXX.XXX - - [30/Oct/2023:16:45:33 +0100] \"GET / HTTP/1.1\" 408 448\r\nXXX.XXX.XXX.XXX - - [30/Oct/2023:16:45:33 +0100] \"GET / HTTP/1.1\" 408 448\r\nXXX.XXX.XXX.XXX - - [30/Oct/2023:16:45:33 +0100] \"GET / HTTP/1.1\" 408 448\r\nXXX.XXX.XXX.XXX - - [30/Oct/2023:16:45:33 +0100] \"GET / HTTP/1.1\" 408 448\r\nXXX.XXX.XXX.XXX - - [30/Oct/2023:16:45:33 +0100] \"GET / HTTP/1.1\" 408 448\r\nXXX.XXX.XXX.XXX - - [30/Oct/2023:16:45:33 +0100] \"GET / HTTP/1.1\" 408 448\r\nXXX.XXX.XXX.XXX - - [30/Oct/2023:16:45:33 +0100] \"GET / HTTP/1.1\" 408 448\r\nXXX.XXX.XXX.XXX - - [30/Oct/2023:16:45:33 +0100] \"GET / HTTP/1.1\" 408 448\r\n```\r\n\r\n**To Reproduce**\r\n\r\n1. Install Ubuntu + apache + reqtimeout + mod security\r\n2. Use rules below\r\n3. Make slowloris attack from server to server:\r\n`wget https://raw.githubusercontent.com/GHubgenius/slowloris.pl/master/slowloris.pl && perl slowloris.pl -dns SERVER_IP -port 80 -timeout 3 -num 750`\r\n4. You will see, apache 408 in access.log, but modsecurity do nothing.\r\n\r\nIf you change rule detection \"408\" to \"200\", it is working (blocking). So, there is any problem with http code 408. Not with the rule itself.\r\n\r\n\r\n\r\n\r\n**Expected behavior**\r\n\r\nDetect 408 http code correctly and block attacker\r\n\r\n**Server (please complete the following information):**\r\nUbuntu 20 TLS (Ubuntu 16 and 18 same problem)\r\nApache/2.4.58 (Ubuntu) + mod_reqtimeout (lower Apache 2.4.x versions same problem)\r\nlibapache2-mod-security2 2.9.3-1ubuntu0.1 (2.9.2 and 2.9.0 same problem)\r\n\r\n\r\n**Rule Set (please complete the following information):**\r\n```\r\nSecRule RESPONSE_STATUS \"@streq 408\" \"id:20,phase:5,t:none,nolog,pass,setvar:ip.slow_dos_counter=+1,expirevar:ip.slow_dos_counter=60,ctl:ruleEngine=On\"\r\nSecRule IP:SLOW_DOS_COUNTER \"@gt 5\" \"id:21,phase:1,t:none,log,deny,status:403,msg:'SlowlorisAttack',ctl:ruleEngine=On\"\r\n```\r\n\r\n**Additional context**\r\n\r\n\r\n", + "summary": "Detect and resolve the issue where the HTTP 408 status code is not being logged or blocked by ModSecurity.\n\n1. **Analyze the logs**: Check the `SecDebugLog` at level 9, which is empty when checking for 408. Confirm that the Apache error log is also empty and verify that the access log shows multiple 408 entries.\n\n2. **Test rule functionality**: Change the rule from `@streq 408` to `@streq 200` for testing purposes. This will confirm that the rule structure and logging mechanisms are functioning correctly since blocking will work in this case.\n\n3. **Reproduce the issue**:\n - Set up the environment: Install Ubuntu, Apache, ModSecurity, and mod_reqtimeout.\n - Deploy the specified rules.\n - Simulate a Slowloris attack using the provided `slowloris.pl` script to generate 408 responses.\n\n4. **Expected behavior**: Ensure that the ModSecurity rules can detect the 408 status and block attackers as intended.\n\n5. **First steps to tackle the problem**:\n - Investigate why 408 responses are not triggering the rule. Review ModSecurity configurations and compatibility with Apache and mod_reqtimeout.\n - Consider updating ModSecurity to a newer version if available, or check for patches that address this issue.\n - Test with different logging levels and configurations to identify any discrepancies in rule processing.\n\nBy following these steps, you should be able to diagnose and potentially resolve the issue with detecting HTTP 408 status codes within ModSecurity.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3007", @@ -62512,8 +64688,8 @@ "repository": 1213, "assignees": [], "labels": [ - 398, - 403 + 403, + 398 ] } }, @@ -62526,6 +64702,7 @@ "node_id": "I_kwDOABQmks51cV4m", "title": "Performance enhancement calling expand_macros()", "body": "Hello,\r\nI have a (very simple) proposal to optimize a portion of the code that is used more than 40 times in the project:\r\n```\r\nmsc_string *str = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string));\r\nstr->value = (char *)input_string;\r\nstr->value_len = strlen(str->value);\r\nexpand_macros(msr, str, rule, msr->mp);\r\noutput_string = str->value;\r\n```\r\n\r\nWe could optimize that code by first checking that input_string contains a '%' (or even \"%{\"). If not, no need to allocate the memory, call the function, etc. We add a (very) little overhead for the check, but avoid a much bigger overhead in most of the cases where the value doesn't contain a macro.\r\nThat could be done in a wrapper function (the name could be adapted), that would simplify the code as well:\r\n```\r\nchar* expand_macros_wrapper(modsec_rec *msr, char *var, msre_rule *rule, apr_pool_t *mptmp) {\r\n if (!strstr(var, \"%{\")) return var;\r\n msc_string *str = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string));\r\n str->value = var;\r\n str->value_len = strlen(str->value);\r\n int changed = expand_macros(msr, str, rule, msr->mp);\r\nif (!changed) return var; // Does this line add anything? Should we always return str->value?\r\n return str->value;\r\n}\r\n```\r\n\r\nThe calling code would simply be:\r\n`output_string = expand_macros_wrapper(msr, input_string, rule, msr->mp);`\r\n\r\nAny remark?", + "summary": "Optimize the `expand_macros()` function by introducing a wrapper function to reduce unnecessary memory allocation and function calls. \n\n1. **Check for Macros**: Implement a check within the wrapper function `expand_macros_wrapper()` to see if `input_string` contains a `%` or `%{`. If not, return the original string immediately.\n\n2. **Create Wrapper Function**: Define `expand_macros_wrapper(modsec_rec *msr, char *var, msre_rule *rule, apr_pool_t *mptmp)` that performs the check and only proceeds with memory allocation and calling `expand_macros()` if necessary.\n\n ```c\n char* expand_macros_wrapper(modsec_rec *msr, char *var, msre_rule *rule, apr_pool_t *mptmp) {\n if (!strstr(var, \"%{\")) return var;\n msc_string *str = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string));\n str->value = var;\n str->value_len = strlen(str->value);\n int changed = expand_macros(msr, str, rule, msr->mp);\n if (!changed) return var; // Consider if this line is necessary\n return str->value;\n }\n ```\n\n3. **Update Calling Code**: Modify the code that calls `expand_macros()` to use `expand_macros_wrapper()` instead. For example:\n ```c\n output_string = expand_macros_wrapper(msr, input_string, rule, msr->mp);\n ```\n\n4. **Evaluate Return Logic**: Assess the necessity of returning `var` when `changed` is false. Decide if always returning `str->value` is more appropriate.\n\n5. **Testing**: Write unit tests to ensure the wrapper function behaves correctly, especially under conditions where macros are and aren't present.\n\n6. **Performance Measurement**: Measure performance improvements before and after implementing the changes to validate the optimization impact. \n\nBy following these steps, effectively reduce overhead in cases where macros are not present, thus enhancing the overall performance of the application.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3010", @@ -62555,6 +64732,7 @@ "node_id": "I_kwDOABQmks54mdYJ", "title": "SecRequestBodyAccess Off is still loading request body in memory", "body": "Hello!\r\nI'm trying to setup ModSecurity but I'm dealing with issues when uploading large files.\r\n\r\nAt first I had issues uploading files so I set `SecRequestBodyAccess` to `Off`, which is working fine for files up to ~800MB but with larger files (1.3GB), nginx is still crashing.\r\n\r\n**Logs and dumps**\r\n\r\nOutput of:\r\n1. Error logs\r\n```txt\r\nterminate called after throwing an instance of 'std::bad_alloc'\r\n what(): std::bad_alloc\r\n[alert] 18607#18607: worker process 18608 exited on signal 6 (core dumped)\r\n```\r\n 2. Backtrace of core dump\r\n```txt\r\n#0 __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:50\r\n#1 0x00007f7bd79ac535 in __GI_abort () at abort.c:79\r\n#2 0x00007f7bd70dc983 in ?? () from /usr/lib/x86_64-linux-gnu/libstdc++.so.6\r\n#3 0x00007f7bd70e28c6 in ?? () from /usr/lib/x86_64-linux-gnu/libstdc++.so.6\r\n#4 0x00007f7bd70e2901 in std::terminate() () from /usr/lib/x86_64-linux-gnu/libstdc++.so.6\r\n#5 0x00007f7bd70e2b89 in __cxa_rethrow () from /usr/lib/x86_64-linux-gnu/libstdc++.so.6\r\n#6 0x00007f7bd7834d84 in std::__cxx11::basic_string, std::allocator >::_M_construct > > (this=this@entry=0x7ffc4a54a280, __beg=..., __end=...) at /usr/include/c++/8/ext/new_allocator.h:116\r\n#7 0x00007f7bd7840489 in std::__cxx11::basic_string, std::allocator >::_M_construct_aux > > (__end=..., __beg=..., this=0x7ffc4a54a280) at /usr/include/c++/8/bits/basic_string.h:252\r\n#8 std::__cxx11::basic_string, std::allocator >::_M_construct > > (\r\n __end=..., __beg=..., this=0x7ffc4a54a280) at /usr/include/c++/8/bits/basic_string.h:255\r\n#9 std::__cxx11::basic_string, std::allocator >::basic_string >, void> (__a=..., __end=..., __beg=..., this=0x7ffc4a54a280) at /usr/include/c++/8/bits/basic_string.h:617\r\n#10 std::__cxx11::basic_string, std::allocator >::_M_replace_dispatch > > (__k2=..., __k1=..., __i2=..., __i1=0 '\\000', this=0x7ffc4a54a1e0) at /usr/include/c++/8/bits/basic_string.tcc:384\r\n#11 std::__cxx11::basic_string, std::allocator >::replace >, void> (\r\n __k2=..., __k1=..., __i2=..., __i1=..., this=0x7ffc4a54a1e0) at /usr/include/c++/8/bits/basic_string.h:2091\r\n#12 std::__cxx11::basic_string, std::allocator >::assign >, void> (\r\n __last=..., __first=..., this=0x7ffc4a54a1e0) at /usr/include/c++/8/bits/basic_string.h:1471\r\n#13 modsecurity::Transaction::requestBodyFromFile (this=0x5642022075d0, path=) at transaction.cc:1032\r\n#14 0x00007f7bd7fdbe5c in ngx_http_modsecurity_pre_access_handler (r=0x564202279dc0)\r\n at /usr/local/src/ModSecurity-nginx/src/ngx_http_modsecurity_pre_access.c:170\r\n#15 0x00005641fe7d512f in ngx_http_core_generic_phase ()\r\n#16 0x00005641fe7d0bdd in ngx_http_core_run_phases ()\r\n#17 0x00007f7bd7fdbd66 in ngx_http_modsecurity_request_read (r=) at /usr/local/src/ModSecurity-nginx/src/ngx_http_modsecurity_pre_access.c:38\r\n#18 0x00005641fe80ac33 in ?? ()\r\n#19 0x00005641fe80d980 in ?? ()\r\n#20 0x00005641fe7d9d1c in ?? ()\r\n#21 0x00005641fe7b8810 in ngx_event_process_posted ()\r\n#22 0x00005641fe7bff71 in ?? ()\r\n#23 0x00005641fe7be84b in ngx_spawn_process ()\r\n#24 0x00005641fe7c0374 in ?? ()\r\n#25 0x00005641fe7c0bd7 in ngx_master_process_cycle ()\r\n#26 0x00005641fe797588 in main ()\r\n```\r\n\r\n**Expected behavior**\r\n\r\nI expected body not to be loaded in memory as `SecRequestBodyAccess` is set to `Off` and `SecRequestBodyLimitAction` is set to `ProcessPartial`.\r\n\r\n**Server :**\r\n - ModSecurity version (and connector): ModSecurity v3.0.10 with nginx-connector v1.0.3\r\n - WebServer: nginx-1.22.0\r\n - OS (and distro): Debian 10 (buster)\r\n\r\n\r\n**Rule Set:**\r\nOWASP coreruleset v3.3.5\r\n\r\n**Additional context**\r\n\r\nThis issue is similar to https://github.com/SpiderLabs/ModSecurity/issues/1517\r\n\r\nWould this patch be acceptable ? I have tested it with large files and nginx does not crash with it. Thanks.\r\n```txt\r\ndiff --git a/src/transaction.cc b/src/transaction.cc\r\nindex 0c98b49c..fffeb70f 100644\r\n--- a/src/transaction.cc\r\n+++ b/src/transaction.cc\r\n@@ -1023,13 +1023,13 @@ int Transaction::requestBodyFromFile(const char *path) {\r\n request_body.seekg(0, std::ios::end);\r\n try {\r\n str.reserve(request_body.tellg());\r\n+ request_body.seekg(0, std::ios::beg);\r\n+ str.assign((std::istreambuf_iterator(request_body)),\r\n+ std::istreambuf_iterator());\r\n } catch (...) {\r\n ms_dbg(3, \"Failed to allocate memory to load request body.\");\r\n return false;\r\n }\r\n- request_body.seekg(0, std::ios::beg);\r\n- str.assign((std::istreambuf_iterator(request_body)),\r\n- std::istreambuf_iterator());\r\n \r\n const char *buf = str.c_str();\r\n int len = request_body.tellg();\r\n\r\n```\r\n", + "summary": "### Summarize the Issue\n\n**Resolve the issue where `SecRequestBodyAccess Off` still loads request body into memory.** \n\n### Problem Overview\nWhen configuring ModSecurity with `SecRequestBodyAccess` set to `Off`, files larger than 800MB are still causing nginx to crash due to memory allocation issues. The error logs indicate a `std::bad_alloc` exception leading to a core dump.\n\n### Key Logs\n- Error Log:\n ```\n terminate called after throwing an instance of 'std::bad_alloc'\n ```\n\n- Backtrace shows allocation failures occurring in `modsecurity::Transaction::requestBodyFromFile`.\n\n### Expected Behavior\nThe system should not load the request body into memory when `SecRequestBodyAccess` is set to `Off` and `SecRequestBodyLimitAction` is `ProcessPartial`.\n\n### Technical Details\n- **ModSecurity Version**: v3.0.10 with nginx-connector v1.0.3\n- **Nginx Version**: 1.22.0\n- **Operating System**: Debian 10 (Buster)\n- **Rule Set**: OWASP core ruleset v3.3.5\n\n### Proposed Solution\nA patch has been suggested to modify the `requestBodyFromFile` function in `transaction.cc` to prevent memory loading when `SecRequestBodyAccess` is `Off`. The patch includes changes to avoid reserving memory for the request body and directly assigning it when seeking the body.\n\n### Suggested Steps to Tackle the Problem\n1. **Review the Patch**: Analyze the provided patch to ensure it correctly implements the desired behavior without introducing new issues.\n2. **Test the Changes**: Compile the modified ModSecurity with the patch and conduct tests with large file uploads to verify stability and correctness.\n3. **Check Documentation**: Ensure that the ModSecurity documentation is updated to reflect any changes in behavior regarding request body handling.\n4. **Engage Community**: Discuss the patch with the ModSecurity community to seek feedback and approval for merging into the main codebase.\n5. **Monitor for Future Issues**: After implementing the patch, continue to monitor the system for any related issues or side effects when handling large file uploads. \n\nBy following the steps outlined, you can address the current problem effectively and prevent future occurrences.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3022", @@ -62583,6 +64761,7 @@ "node_id": "I_kwDOABQmks58F3lF", "title": "lua m.log(2, \"4444\") where is the log message?", "body": "```\r\ndocker run --privileged --rm -ti owasp/modsecurity-crs:3.3.5-nginx-alpine-202401080101\r\n\r\n/etc/modsecurity.d # grep SecRule setup.conf\r\nSecRuleScript \"/tmp/2.lua\" \"id:23333,deny\"\r\n\r\n/etc/modsecurity.d/owasp-crs # cat /tmp/2.lua\r\nfunction main()\r\n --error(\"4444\")\r\n --print(\"4444\")\r\n m.log(1,\"4444\")\r\n m.log(2,\"4444\")\r\n return nil;\r\nend\r\n```\r\n\r\nI am sure lua script have been exectued, but i checked \"console output\"、\"nginx error log\", and can not found the log message \"4444\".\r\n\r\nSo where is the log message?\r\n\r\nhttps://github.com/coreruleset/modsecurity-crs-docker/issues/185", + "summary": "Investigate the issue of missing log messages in the Lua script executed within ModSecurity. \n\n1. Verify the logging configuration in ModSecurity and ensure that the log level is appropriately set to capture messages logged by `m.log()`. Check the ModSecurity configuration files for directives related to logging.\n\n2. Confirm that the correct output destinations for logs are configured. Review the error log configuration in Nginx to ensure that it captures logs from ModSecurity.\n\n3. Test different log levels in the Lua script. For example, modify `m.log(1, \"4444\")` and `m.log(2, \"4444\")` to use a higher log level (e.g., `m.log(3, \"4444\")`) to determine if the messages are being filtered out.\n\n4. Examine the Docker container's logs. Use `docker logs ` to check if the messages are appearing there, as they might not be directed to the standard output or Nginx error log.\n\n5. Enable debug logging for ModSecurity if possible. This may provide additional context about script execution and logging behavior.\n\n6. Ensure that the Lua script is indeed being executed by placing additional logging statements or using error handling mechanisms to catch any runtime errors that may prevent logging.\n\n7. Review the ModSecurity and Nginx versions for compatibility issues that may affect logging behavior. Consider updating to the latest versions if necessary.\n\nBy following these steps, you should be able to identify the source of the logging issue and determine where the log messages are being sent or if they are being suppressed.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3035", @@ -62611,6 +64790,7 @@ "node_id": "I_kwDOABQmks58TCg5", "title": "Feature: Can we have a basic benchmark report graph in the README", "body": "Providing a basic benchmark report graph for performance recording.", + "summary": "Add a basic benchmark report graph to the README to visualize performance metrics. Use a graphing library such as Matplotlib or Chart.js to create the graph. \n\n1. Collect benchmark data using appropriate testing tools, such as Benchmark.js or JMH.\n2. Format the data for visualization, ensuring it accurately reflects performance over time or across different scenarios.\n3. Integrate the graph into the README by converting it to an image format (PNG, SVG) or by embedding it directly if using Markdown.\n4. Update the README documentation to explain the graph and its significance in performance analysis.\n\nBegin by determining the key performance metrics to track and establish a consistent benchmarking process.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3036", @@ -62637,6 +64817,7 @@ "node_id": "I_kwDOABQmks58wMOe", "title": "lua script can not access XML variable", "body": "**Describe the bug**\r\n\r\n`m.getvars(\"XML\")` return null\r\n\r\n**To Reproduce**\r\n\r\n1. setup modsecurity \r\n\r\n ```\r\n docker run -ti --rm -p 8083:80 -e ERRORLOG=/tmp/nginx_error.log -e MODSEC_DEBUG_LOG=/tmp/debug.log -e MODSEC_AUDIT_LOG=/tmp/audit.log -e BACKEND=http://10.56.58.13:8888 docker.io/owasp/modsecurity-crs:3.3.5-nginx-alpine-202401080101\r\n ```\r\n \r\n replace BACKEND address to yours\r\n\r\n2. edit `modsecurity.conf`\r\n\r\n```\r\n/etc/modsecurity.d # grep lua *\r\nmodsecurity.conf:SecRuleScript \"/tmp/2.lua\" \"id:23333,phase:2,deny\"\r\n```\r\n\r\n3. create lua script\r\n\r\n```\r\n-- File operation function\r\nfunction writeToFile(filepath, content)\r\n local file, err = io.open(filepath, \"a\") -- \"w\" stands for write mode\r\n if file == nil then\r\n error(\"Couldn't open file: \"..err)\r\n else\r\n file:write(content..\"\\n\")\r\n file:close()\r\n end\r\nend\r\n\r\n-- Test function\r\nfunction test(x)\r\n local status, err = pcall(writeToFile, \"/tmp/test.txt\", x)\r\n if not status then\r\n print(\"Error: \" .. err)\r\n end\r\nend\r\n\r\n-- Main function\r\nfunction main()\r\n local inspect = require(\"inspect\") -- The inspect library needs to be installed separately https://github.com/kikito/inspect.lua\r\n\r\n local vars = inspect(m.getvars(\"PATH_INFO\")) -- Print the PATH_INFO variable to standard error\r\n test(vars)\r\n\r\n test(\"4444\")\r\n\r\n local vars = inspect(m.getvars(\"XML\")) \r\n test(\"XML\"..vars)\r\n\r\n return nil;\r\nend\r\n```\r\n\r\n4. send http request\r\n\r\n```\r\nPOST /webtools/control/SOAPService HTTP/1.1\r\nHost: example.com\r\nUser-Agent: Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2117.157 Safari/537.36\r\nConnection: close\r\nContent-Length: 355\r\nContent-Type: application/xml\r\nAccept-Encoding: gzip\r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n```\r\n\r\n5. look up /tmp/test.log \r\n\r\n**Expected behavior**\r\n\r\nExpected behavior: /tmp/test.log has information abount XML variable\r\n\r\nActual behavior:\r\n\r\n/tmp/test.log content is below\r\n```\r\n{ {\r\n name = \"PATH_INFO\",\r\n value = \"/webtools/control/SOAPService\"\r\n } }\r\n4444\r\n```\r\n\r\nAnd some error occur\r\n```\r\n2024/01/19 08:00:42 [alert] 1#1: worker process 90735 exited on signal 6\r\nterminate called after throwing an instance of 'std::invalid_argument'\r\n what(): Variable not found.\r\n```\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): [e.g. ModSecurity v3.0.8 with nginx-connector v1.0.3]\r\n - WebServer: nginx/1.25.3\r\n - OS (and distro): linux\r\n\r\n", + "summary": "**Address the issue with XML variable access in Lua script**\n\n1. **Identify the problem**: The Lua script fails to access the XML variable using `m.getvars(\"XML\")`, returning null instead of the expected value.\n\n2. **Reproduce the error**: Follow these steps:\n - Set up ModSecurity with Docker using the provided command, ensuring to replace the BACKEND address.\n - Modify `modsecurity.conf` to include the Lua script.\n - Create the Lua script as outlined, ensuring the `inspect` library is installed.\n - Send an HTTP POST request with an XML payload.\n\n3. **Examine the output**: Check the contents of `/tmp/test.txt`. It should include information about the `PATH_INFO` variable and the value \"4444\". However, the XML variable is not logged, indicating the issue.\n\n4. **Investigate error logs**: Look for errors in the log files, particularly noting the message “Variable not found.” This suggests that ModSecurity cannot find the XML variable in the request context.\n\n5. **Potential first steps to resolve**:\n - Confirm the ModSecurity configuration for parsing XML requests is correctly set up.\n - Verify that the XML payload is being processed and that the variable name \"XML\" is correct.\n - Review ModSecurity documentation for any specific settings or rules required to access XML data.\n - Debug the Lua script by adding additional logging or error handling to identify where the variable access fails.\n - Check compatibility between the ModSecurity version and the Lua script features being utilized.\n\nBy following these steps, you can methodically troubleshoot and resolve the issue with XML variable access in the Lua script.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3037", @@ -62663,6 +64844,7 @@ "node_id": "I_kwDOABQmks59S3qp", "title": "Concurrent log index files do not respect SecAuditLogRelevantStatus", "body": "**Describe the bug**\r\naudit.log_0 and audit.log_1 do not contain 404s following SecAuditLogRelevantStatus being set to: `SecAuditLogRelevantStatus \"^(?:5|4)\"` in concurrent logging mode with JSON format.\r\n\r\n**Logs and dumps**\r\nAll the status codes in the log index files are as described.\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the behavior:\r\n\r\nConfigure audit logging as described and cause 404s in test server.\r\n\r\n**Expected behavior**\r\n\r\nAll concurrent log entries that match the relevant status should exist in the log index files.\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity v3.0.8 with nginx-connector v1.0.3\r\n - nginx-1.22.0\r\n - Linux, ubuntu-20.04\r\n\r\n\r\n**Rule Set (please complete the following information):**\r\n - OWASP rules 3.0.2\r\n - Some simple rules that return 404\r\n", + "summary": "**Fix Concurrent Log Index Files to Respect SecAuditLogRelevantStatus**\n\nEnsure that `audit.log_0` and `audit.log_1` accurately reflect the status codes specified by `SecAuditLogRelevantStatus`, particularly the expected 404s when configured for concurrent logging in JSON format.\n\n**Steps to Reproduce:**\n1. Set up ModSecurity with the following configurations:\n - Use `SecAuditLogRelevantStatus \"^(?:5|4)\"`.\n - Enable concurrent logging.\n2. Trigger 404 errors on the test server.\n3. Check the contents of `audit.log_0` and `audit.log_1`.\n\n**Expected Outcome:**\nVerify that both log index files include entries for 404 status codes as specified in the configuration.\n\n**Technical Details:**\n- Ensure ModSecurity v3.0.8 and nginx-connector v1.0.3 are properly integrated with nginx-1.22.0 on Ubuntu 20.04.\n- Review the rule set, specifically OWASP rules 3.0.2, to confirm they are correctly implemented and not filtering out relevant log entries.\n\n**First Steps to Address the Issue:**\n1. Review the configuration for `SecAuditLogRelevantStatus` to confirm it is correctly applied.\n2. Analyze the audit logging mechanism to ensure it processes and writes all relevant status codes.\n3. Test the logging functionality under various conditions to isolate the circumstances under which 404s are excluded.\n4. Update ModSecurity to the latest version if necessary to address any known bugs related to logging.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3038", @@ -62689,6 +64871,7 @@ "node_id": "I_kwDOABQmks59VkH2", "title": "Regex key non-matches with libModSecurity persistent storage", "body": "In ModSecurity v3, when using persistent storage of variables (i.e. lasting beyond a single transaction -- either with the in-memory option or the lmdb option), some regular expression matches for variable keys may not match as would reasonably be expected.\r\n\r\nE.g. Consider this set of rules:\r\n```\r\nSecAction \"initcol:ip='127.0.0.1',id:6000,pass,phase:1\"\r\nSecRule ARGS \"@rx .\" \"id:6101,phase:2,pass,setvar:ip.mycount1=1\"\r\nSecRule IP:'/mycount1/' \"@rx .\" \"id:6102,phase:2,pass,log,msg:'matched is %{MATCHED_VAR}'\"\r\nSecRule IP:'/^mycount1/' \"@rx .\" \"id:6103,phase:2,pass,log,msg:'matched is %{MATCHED_VAR}'\"\r\n```\r\n\r\nAnd then execute: ```curl http://localhost/testget.php?a=b```\r\n\r\nRule 6101 has created the variable 'mycount1' within the IP collection for 127.0.0.1. Its value can be accessed using the variable-key regex specification in rule 6102.\r\n\r\nRule 6103, however, will not match the variable key. In this case the regex pattern passed to pcre is ```127.0.0.1::::^mycount1```. This obviously will not yield the result one would ordinarily expect from looking at rule 6103.\r\n\r\nThis behaviour was not seen in ModSecurity v2.\r\n", + "summary": "Investigate the issue of regex key non-matches with libModSecurity persistent storage in ModSecurity v3. Confirm that regular expression matches for variable keys do not behave as expected when using persistent storage, either in-memory or lmdb, particularly in the context of the provided rules.\n\n1. Reproduce the issue using the specified rules:\n - Initialize the collection with `SecAction \"initcol:ip='127.0.0.1',id:6000,pass,phase:1\"`.\n - Execute a request with `curl http://localhost/testget.php?a=b` to trigger rules 6101, 6102, and 6103.\n\n2. Analyze the regex patterns:\n - Note that rule 6101 correctly creates the variable 'mycount1' in the IP collection for 127.0.0.1.\n - Examine the regex pattern used in rule 6103 (`/^mycount1/`) and compare it with the output observed from the PCRE matching process.\n\n3. Identify discrepancies:\n - Understand that the pattern `127.0.0.1::::^mycount1` is generated for the regex, leading to unexpected behavior due to the inclusion of the IP address and additional colons.\n\n4. Explore possible solutions:\n - Review the implementation of regex handling in ModSecurity v3 compared to v2, focusing on variable key matching.\n - Consider modifying the regex pattern to accommodate the persistent storage structure or adjust how variable keys are parsed in the new version.\n\n5. Document findings:\n - Report any identified bugs or unexpected behavior to the ModSecurity development team for further analysis and potential fixes. \n\nBy following these steps, you can effectively tackle the problem and contribute to resolving the regex key non-matches in ModSecurity v3.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3039", @@ -62704,8 +64887,8 @@ "repository": 1213, "assignees": [], "labels": [ - 399, - 400 + 400, + 399 ] } }, @@ -62718,6 +64901,7 @@ "node_id": "I_kwDOABQmks59anyM", "title": "Review all repositories under owasp-modsecurity org and retire the ones no longer needed", "body": "Trustwave Spiderlabs has the following repos with a connection to ModSecurity. 4 of them have been transferred as of this writing (2024-01-28):\r\n\r\n* https://github.com/SpiderLabs/ModSecurity (has been transferred)\r\n* https://github.com/SpiderLabs/ModSecurity-nginx (has been transferred)\r\n* https://github.com/SpiderLabs/ModSecurity-apache (has been transferred)\r\n* https://github.com/SpiderLabs/modsec-sdbm-util\r\n* https://github.com/SpiderLabs/ModSecurity-Python-bindings\r\n* https://github.com/SpiderLabs/ModSecurity-log-utilities\r\n* https://github.com/SpiderLabs/secrules-language-tests (has been transferred)\r\n* https://github.com/SpiderLabs/modsecurity-mlogc-ng\r\n* https://github.com/SpiderLabs/secrules-language-evaluation\r\n* https://github.com/SpiderLabs/ModSecurity-pcap\r\n* https://github.com/SpiderLabs/ModSecurity-status\r\n\r\nSo there are 3 tasks ahead:\r\n\r\n- [ ] Get all the remaining repos\r\n- [ ] Evaluate all repos and document them\r\n- [ ] Retire the ones we no longer need", + "summary": "Review all repositories under the owasp-modsecurity organization and identify those that are no longer needed. Focus on the following remaining repositories associated with Trustwave Spiderlabs:\n\n1. https://github.com/SpiderLabs/modsec-sdbm-util\n2. https://github.com/SpiderLabs/ModSecurity-Python-bindings\n3. https://github.com/SpiderLabs/ModSecurity-log-utilities\n4. https://github.com/SpiderLabs/modsecurity-mlogc-ng\n5. https://github.com/SpiderLabs/secrules-language-evaluation\n6. https://github.com/SpiderLabs/ModSecurity-pcap\n7. https://github.com/SpiderLabs/ModSecurity-status\n\nExecute the following tasks:\n\n1. **Gather all remaining repositories**: Compile a comprehensive list of all repositories that still exist under the owasp-modsecurity org.\n2. **Evaluate and document each repository**: Analyze the purpose, usage, and relevance of each repository. Create documentation that outlines their functionality and current status.\n3. **Retire unnecessary repositories**: Based on the evaluation, mark the repositories that are obsolete or no longer required for active development or support.\n\nFirst steps to tackle this problem include:\n\n- Clone all remaining repositories locally for thorough inspection.\n- Review README files, issues, and pull requests in each repository to understand their current use and importance.\n- Collaborate with team members to gather insights on the necessity of each repository.\n- Create a decision matrix to facilitate the retirement process based on usage and relevance.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3044", @@ -62746,6 +64930,7 @@ "node_id": "I_kwDOABQmks59an4q", "title": "Review and update security.md", "body": "The security.md is very minimal and it also points to the Spiderlabs repo.\r\n\r\nThis has to be updated and expanded.", + "summary": "Update the security.md file to enhance its content. Expand on the current minimal information and ensure it no longer solely references the Spiderlabs repository. \n\n1. Review the existing security.md for current content and structure.\n2. Research best practices for security documentation in your specific context or framework.\n3. Identify and include critical security policies, guidelines, and procedures relevant to the project.\n4. Add sections on reporting vulnerabilities, handling security incidents, and regular security audits.\n5. Incorporate links to relevant security resources, tools, or libraries.\n6. Ensure clarity and conciseness for better understanding by users of varying technical backgrounds.\n7. Solicit feedback from team members or security experts to refine the document further. \n\nImplement these changes to enhance the overall security posture communicated through the documentation.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3045", @@ -62774,6 +64959,7 @@ "node_id": "I_kwDOABQmks59eNdN", "title": "Update Wikipedia:ModSecurity", "body": "https://en.wikipedia.org/wiki/ModSecurity", + "summary": "Update the Wikipedia page for ModSecurity. \n\n1. Review the current content for accuracy and completeness.\n2. Research recent developments in ModSecurity, including new features, updates, and community contributions since the last edit.\n3. Identify and gather reliable sources to support the updates, such as official documentation, release notes, and reputable articles.\n4. Edit the page to include information on recent enhancements, changes in usage, and integration with other tools.\n5. Ensure adherence to Wikipedia’s guidelines on verifiability and neutrality.\n6. Add references to the sources to maintain credibility.\n7. Consider updating sections on installation, configuration, and use cases to reflect the latest practices.\n8. Submit the changes for review and monitor for any feedback or additional edits from the community.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3046", @@ -62802,6 +64988,7 @@ "node_id": "I_kwDOABQmks59mmoi", "title": "Problem with logrotate: Log rotation writes to incorrect file", "body": "**Problem Description:**\r\n\r\nAfter configuring logrotate to handle ModSecurity log rotation, I encountered an unexpected issue. After the first rotation, logs are being written to the file modsec_audit.log.1 instead of modsec_audit.log. Additionally, all subsequent logs are appended to the modsec_audit.log.1 file, leading to potentially large log files.\r\n\r\n**Logrotate Configuration:**\r\n\r\n```\r\n/var/log/modsec_audit.log {\r\n daily\r\n rotate 14\r\n compress\r\n delaycompress\r\n missingok\r\n notifempty\r\n create 644 root root\r\n sharedscripts\r\n postrotate\r\n /usr/sbin/service nginx reopenlogs >/dev/null 2>&1 || true\r\n endscript\r\n}\r\n```\r\n\r\n*Steps to Reproduce the Issue:*\r\n\r\n1. ModSecurity configuration adhering to recommendations.\r\n2. Logrotate configuration added in /etc/logrotate.d/modsec.\r\n\r\n*Expected Behavior:*\r\n\r\nLogs should be correctly rotated into the modsec_audit.log file with the proper rotation suffix.\r\n\r\n*Observed Behavior:*\r\nLogs are written to the modsec_audit.log.1 file after the first rotation, and subsequent logs are appended to the modsec_audit.log.1 file.\r\n\r\n*Environment:*\r\n\r\nOperating System: linux ubuntu 20.04\r\nModSecurity Version: V3\r\nLogrotate Version: 3.14.0\r\n\r\nYour assistance in addressing this matter and providing guidance or a fix would be greatly appreciated. Thank you!", + "summary": "**Address the logrotate issue with ModSecurity log rotation.**\n\n1. **Review Logrotate Configuration:** Check the current configuration in `/etc/logrotate.d/modsec`. Ensure the syntax is correct and matches your intended log file management.\n\n2. **Modify Configuration:** Update the logrotate script to ensure that logs are written to the correct file. Consider adding the `olddir` directive to specify where rotated files should be stored. \n\n3. **Investigate Postrotate Script:** Examine the `postrotate` script. Ensure that the command `/usr/sbin/service nginx reopenlogs` is functioning correctly and that Nginx is properly reloading its log files after rotation.\n\n4. **Testing:** After making changes, manually trigger logrotate using the command:\n ```bash\n logrotate -f /etc/logrotate.conf\n ```\n This forces log rotation and allows you to observe if logs are correctly written to `modsec_audit.log`.\n\n5. **Monitor Log Files:** Observe the behavior of `modsec_audit.log` and `modsec_audit.log.1` after the test. Verify if logs are now being written to the correct file.\n\n6. **Check Permissions:** Ensure that the permissions and ownership of the log files are correctly set to allow logrotate to write to them without issues.\n\n7. **Consult Documentation:** Refer to the logrotate and ModSecurity documentation for any additional options or best practices that may affect log handling.\n\n8. **Seek Community Help:** If issues persist, consider posting the updated configuration and findings to relevant forums or GitHub for additional support.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3047", @@ -62830,6 +65017,7 @@ "node_id": "I_kwDOABQmks5-ZQQ0", "title": "libmodsecurity3: SecAction can't be disabled via ctl action", "body": "**Describe the bug**\r\n\r\nIn libmodsecurity3, SecAction can't be disabled via a ctl action like with SecRules. This issue isn't present in ModSecurity2.\r\n\r\n**Logs and dumps**\r\n\r\nN/A\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the behavior:\r\n1. Have a fresh Ubuntu 22.04 server setup, this bug was a bit tricky to reproduce when I was opening this issue.\r\n2. ModSecurity was tested and installed using [digitalwave's ModSecurity repository](https://modsecurity.digitalwave.hu/) and ``apt install nginx-extras libnginx-mod-http-modsecurity``\r\n3. I installed this version of [CRSv4 RC-2](https://github.com/coreruleset/coreruleset/commit/f987e297113545b53d2479a58553e520a3145947) and [ModSecurity.conf](https://github.com/owasp-modsecurity/ModSecurity/commit/622eb9e6c860ade1e2195d316dfbd86ce5710445) with the ``SecRuleEngine`` directive set to on, but it shouldn't matter what rulesets are used.\r\n4. Create this test conf file, but make sure to load it before any blocking rules (i.e ``Include /etc/nginx/modsecurity/coreruleset/rules/*.conf``)\r\n```\r\nSecRule REQUEST_HEADERS:Host \"!@streq example.com\" \"id:1,phase:1,pass,t:none,nolog,ctl:ruleRemoveById=2\"\r\n\r\nSecAction \\\r\n \"id:2,phase:1,pass,t:none,nolog,ctl:ruleRemoveByTag=OWASP_CRS\"\r\n```\r\n5. restart nginx then try to send an attack payload, rule 2 should be disabled yet it still disables OWASP_CRS ``curl 127.0.0.1?exec/bin/bash``\r\n6. If you modify the test file to this, then rule 2 is disabled and everything works as expected:\r\n```\r\nSecRule REQUEST_HEADERS:Host \"!@streq example.com\" \"id:1,phase:1,pass,t:none,nolog,ctl:ruleRemoveById=2\"\r\n\r\nSecRule REQUEST_FILENAME \"@unconditionalMatch\" \\\r\n \"id:2,phase:1,pass,t:none,nolog,ctl:ruleRemoveByTag=OWASP_CRS\"\r\n```\r\n**Expected behavior**\r\n\r\nSecActions rules should be disableable via a ctl action, just like SecRules.\r\n\r\n**Server:**\r\n - ModSecurity version (and connector): libmodsecurity 3.0.12 and ModSec-Nginx 1.0.3\r\n - WebServer: Nginx 1.18.0\r\n - OS: Ubuntu 22.04\r\n\r\n**Rule Set:**\r\n - [CRSv4 RC-2](https://github.com/coreruleset/coreruleset/commit/f987e297113545b53d2479a58553e520a3145947)\r\n\r\n**Additional context**\r\n\r\nThis issue currently affects some CRS plugins such as Nextcloud or WordPress, if you wish to use them in a reverse proxy and want to selectively enable/disable the plugins for certain domains.", + "summary": "Disable SecAction via ctl action in libmodsecurity3.\n\n**Background**: In libmodsecurity3, SecAction rules cannot be disabled using ctl actions, contrary to the behavior in ModSecurity2. This issue may impact specific applications like Nextcloud and WordPress when used as reverse proxies.\n\n**Steps to Reproduce**:\n1. Set up a fresh Ubuntu 22.04 server.\n2. Install ModSecurity using the repository from digitalwave with the command: \n ```bash\n apt install nginx-extras libnginx-mod-http-modsecurity\n ```\n3. Use CRS v4 RC-2 and the latest ModSecurity configuration with `SecRuleEngine on`.\n4. Create a configuration file, ensuring it loads before any blocking rules with:\n ```nginx\n SecRule REQUEST_HEADERS:Host \"!@streq example.com\" \"id:1,phase:1,pass,t:none,nolog,ctl:ruleRemoveById=2\"\n \n SecAction \\\n \"id:2,phase:1,pass,t:none,nolog,ctl:ruleRemoveByTag=OWASP_CRS\"\n ```\n5. Restart Nginx and test with:\n ```bash\n curl 127.0.0.1?exec/bin/bash\n ```\n Expect rule 2 to be disabled, but it still disables OWASP_CRS.\n6. Modify the test file to disable rule 2 successfully:\n ```nginx\n SecRule REQUEST_HEADERS:Host \"!@streq example.com\" \"id:1,phase:1,pass,t:none,nolog,ctl:ruleRemoveById=2\"\n \n SecRule REQUEST_FILENAME \"@unconditionalMatch\" \\\n \"id:2,phase:1,pass,t:none,nolog,ctl:ruleRemoveByTag=OWASP_CRS\"\n ```\n\n**Expected Outcome**: Enable disabling of SecAction rules via ctl actions as with SecRules.\n\n**Next Steps**:\n1. Investigate the implementation of SecAction handling in libmodsecurity3 and identify differences from ModSecurity2.\n2. Adjust the logic for processing ctl actions to include SecAction rules.\n3. Test the changes in a controlled environment to confirm the issue resolution.\n4. Document the changes and update any relevant tests or configurations for clarity.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3053", @@ -62845,8 +65033,8 @@ "repository": 1213, "assignees": [], "labels": [ - 399, - 400 + 400, + 399 ] } }, @@ -62859,6 +65047,7 @@ "node_id": "I_kwDOABQmks5-kDjq", "title": "Inadequate locking for multithreading in libModSecurity In-Memory persistent storage", "body": "This issue does not apply to the main, current use case of libModSecurity with nginx, since that is a multi-process but single-threaded application. This finding is based on code inspection only; I did not build a multi-threaded environment to prove a crash or other tangible errors.\r\n\r\nWhile adding the expirevar support I noticed that, while the in-memory option includes some internal locking, it does so only for updates.\r\n\r\nThe problem is that if ThreadA is performing a read-only action using an interator and, while that is ongoing, ThreadB makes an update to the data, it could invalidate the iterator that ThreadA is in the midst of using. (Note that the update by ThreadB might be a deletion, but an insertion could also invalidate the iterator that is in-use by ThreadA.)\r\n", + "summary": "Investigate and address inadequate locking mechanisms for multithreading in libModSecurity's In-Memory persistent storage. \n\nRecognize that the current implementation primarily supports multi-process, single-threaded applications (e.g., nginx), which does not expose the issue directly. Note that the problem arises during concurrent read and write operations, particularly when ThreadA reads data using an iterator while ThreadB simultaneously updates that data.\n\nIdentify that the existing locking only safeguards update operations, leaving read operations vulnerable to iterator invalidation. This can occur if ThreadB deletes or inserts data while ThreadA is iterating.\n\nTo tackle this problem, consider the following first steps:\n1. Conduct a thorough code review to pinpoint where locking mechanisms are implemented and where they are lacking for read operations.\n2. Implement mutexes or read-write locks around read operations to ensure that iterators remain valid during concurrent updates.\n3. Test the modified locking strategy in a controlled multithreaded environment to verify that the solution prevents iterator invalidation.\n4. Document the changes and their implications for performance and concurrency within libModSecurity.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3054", @@ -62885,6 +65074,7 @@ "node_id": "I_kwDOABQmks5-5K4c", "title": "Different behavior of the engines implicit `@rx` operator", "body": "This is not a bug report, but more of a discussion thread.\r\n\r\nAs you know, ModSecurity allows to create a `SecRule` without operator. In this case the `@rx` operator will be used. Here are the documented behaviors:\r\n\r\n[v2](https://github.com/owasp-modsecurity/ModSecurity/wiki/Reference-Manual-%28v2.x%29#rx): \"**_This is the default operator; the rules that do not explicitly specify an operator default to @rx._**\"\r\n\r\n[v3](https://github.com/owasp-modsecurity/ModSecurity/wiki/Reference-Manual-%28v3.x%29#user-content-rx): \"**_This is the default operator; the rules that do not explicitly specify an operator default to @rx._**\"\r\n\r\nLet's take a step back and see the next cases.\r\n\r\nCreate a rule which checks a regular expression:\r\n```\r\nSecRule ... \"@rx attack\" \"...\"\r\n```\r\nor we can check the exact match:\r\n```\r\nSecRule ... \"@strEq attack\" \"...\"\r\n```\r\n\r\nThe argument can be anything - for example it can be a domain part of the e-mail address:\r\n```\r\nSecRule ... \"@rx @modsecurity.org\" \"...\"\r\nSecRule ... \"@contains @modsecurity.org\" \"...\"\r\n```\r\n\r\nAnd now we can leave the operator:\r\n```\r\nSecRule ... \"@modsecurity.org\" \"...\"\r\n```\r\n\r\nNow what happens? It depends on what version of ModSecurity you use.\r\n\r\nIn case of v2, you will get an error message:\r\n```\r\nError creating rule: Failed to resolve operator: modsecurity.org\r\n```\r\n\r\nIn case of v3, there won't be any error message, the engine will create a `SecRule` (what you **won't** see):\r\n```\r\nSecRule ... \"@rx @modsecurity.org\" ...\r\n```\r\nNow imagine what happens if someone makes a typo in the operator's name...\r\n\r\n**Expected behavior**\r\n\r\nThe aim of this issue is to discuss about what is the expected behavior in this case.\r\n\r\nI think using of implicit configuration at any place is a very bad idea. I would leave it at all, I mean I would completely eliminate it.\r\n", + "summary": "Discuss the different behaviors of the implicit `@rx` operator in ModSecurity v2 and v3. Identify that the absence of an explicitly defined operator defaults to `@rx`, as documented for both versions.\n\n1. **Analyze Rule Creation**:\n - Create rules using explicit operators such as `@rx` and `@strEq`.\n - Use sample rules to illustrate the point:\n ``` \n SecRule ... \"@rx attack\" \"...\"\n SecRule ... \"@strEq attack\" \"...\"\n ```\n\n2. **Examine Implicit Operator Behavior**:\n - Introduce rules without an operator:\n ```\n SecRule ... \"@modsecurity.org\" \"...\"\n ```\n - Note the difference in behavior:\n - In v2, this results in an error: \n ```\n Error creating rule: Failed to resolve operator: modsecurity.org\n ```\n - In v3, it silently defaults to using `@rx`:\n ```\n SecRule ... \"@rx @modsecurity.org\" \"...\"\n ```\n\n3. **Identify Potential Issues**:\n - Discuss implications of typos in operator names leading to unintended behavior.\n - Outline the risks of silent failures in rule creation, particularly in v3.\n\n4. **Propose Solutions**:\n - Suggest eliminating implicit operator configurations to prevent confusion and errors.\n - Consider making the requirement for explicit operator definitions mandatory in both versions.\n\n5. **Next Steps**:\n - Initiate a discussion among contributors regarding the proposed changes.\n - Collect feedback on the impact of implicit behavior on user experience and rule management.\n - Document findings and recommendations for potential updates to ModSecurity guidelines.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3081", @@ -62911,6 +65101,7 @@ "node_id": "I_kwDOABQmks5_Nu7J", "title": "No CPE for 3.0.11 and 3.0.12", "body": "I am the package maintainer of ModSecurity in Buildroot. Buildroot has automated tracking of CVEs which it does by checking the CPE for the corresponding release. It seems that for both 3.0.11 and 3.0.12 no CPE was registered. The newest CPE I can find in the NIST database is [cpe:2.3:a:trustwave:modsecurity:3.0.10:*:*:*:*:*:*:*](https://nvd.nist.gov/products/cpe/detail/666EBCC2-D5F2-4C26-B2F0-172234B20624?namingFormat=2.3&orderBy=CPEURI&keyword=modsecurity&status=FINAL\r\n)\r\nThis has effectively broken the CVE reporting infrastructure for ModSecurity in Buildroot, causing us to miss [CVE-2024-1019](https://nvd.nist.gov/vuln/detail/CVE-2024-1019).\r\n\r\n\r\nWill the creation of CPEs resume in the future for future versions or will this be deprecated?", + "summary": "Register CPEs for ModSecurity versions 3.0.11 and 3.0.12. Investigate the absence of CPE entries in the NIST database, which currently only lists CPE for version 3.0.10. Address the impact on CVE reporting in Buildroot, as this omission has led to missed vulnerabilities like CVE-2024-1019.\n\nFirst steps to tackle the problem:\n\n1. Confirm the CPE registration process for ModSecurity with Trustwave.\n2. Contact NIST to understand the status of CPE registration for recent ModSecurity releases.\n3. Gather information on how the lack of CPEs affects the CVE tracking system in Buildroot.\n4. Explore alternatives for tracking CVEs until CPEs are updated.\n5. Monitor future version releases for CPE registration status and ensure timely updates.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3083", @@ -62940,6 +65131,7 @@ "node_id": "I_kwDOABQmks5_Vxq8", "title": "Problem with logfile's name", "body": "I have last version Modsecurity (but with previous version was same problem). In configfile /etc/nginx/modsec/modsecurity.conf i have:\r\nSecAuditLogType Serial\r\nSecAuditLog /var/log/modsec/modsec_audit.log\r\nThis file is created, but he have zero size and modsec contineuos write in file in format: modsec_audit.log-YYYYMMDD\r\n\r\nPlease help me. What could be the problem? How fix it?", + "summary": "Investigate the issue with the ModSecurity log file not populating correctly. The configuration in `/etc/nginx/modsec/modsecurity.conf` specifies:\n\n- Use `SecAuditLogType Serial` for logging.\n- Output log to `/var/log/modsec/modsec_audit.log`.\n\nConfirm that the log file is created but remains zero in size and that ModSecurity is writing to rotated files in the format: `modsec_audit.log-YYYYMMDD`.\n\nTo tackle the problem:\n\n1. Check file permissions of `/var/log/modsec/` and ensure that the Nginx user has write access to `modsec_audit.log`.\n2. Review the ModSecurity logs for any error messages that might indicate why logging fails.\n3. Validate that ModSecurity is properly installed and configured in Nginx.\n4. Ensure that the Nginx service has been restarted after making any configuration changes.\n5. Test the logging by triggering ModSecurity rules and observe if entries are logged.\n6. If issues persist, consider changing the log type to `Concurrent` temporarily to see if it affects logging behavior.\n7. Review documentation for any specific requirements or known issues related to the version of ModSecurity in use.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3084", @@ -62968,6 +65160,7 @@ "node_id": "I_kwDOABQmks5_7_Va", "title": "SanitiseArg does not work in RequestBody", "body": "**SanitiseArg does not work in RequestBody**\r\nThis time without messed up markdown :)\r\n\r\nTaken right from the docs: https://github.com/owasp-modsecurity/ModSecurity/wiki/Reference-Manual-(v2.x)#user-content-sanitiseArg\r\n\r\nI want to sanitiese two password fields in a POST body, but the Rule is not working. I am on Ubuntu 22.04.03 LTS for testing and Apache 2.4.52\r\n\r\nI have defined five rules (for each phase for testing, although only phase 2 should be relevant) in my custom rules:. see the waf_adaption attachment:\r\n`SecAction \"auditlog,phase:2,id:131,sanitiseArg:password1,sanitiseArg:password2\"\r\n`\r\n**Logs and dumps**\r\n\r\nOutput of:\r\n 1. DebugLogs (level 9)\r\n 2. AuditLogs\r\n 3. Error logs: is empty\r\n 4. If there is a crash, the core dump file: n crash\r\n\r\n\r\nSee the attached files, also the modsecurity [configuration](url)\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the behavior:\r\n\r\n`curl 'http://localhost/test' -X POST -H 'User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:120.0) Gecko/20100101 Firefox/120.0' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8' -H 'Accept-Language: en-US,en;q=0.5' -H 'Accept-Encoding: gzip, deflate, br' -H 'Connection: keep-alive' -H 'Upgrade-Insecure-Requests: 1' -H 'Sec-Fetch-Dest: document' -H 'Sec-Fetch-Mode: navigate' -H 'Sec-Fetch-Site: same-origin' -H 'Sec-Fetch-User: ?1' -H 'Origin: http://localhost' -H 'Pragma: no-cache' -H 'Cache-Control: no-cache' --data-raw $'password1=xyz&password2=test&inj=1\\' or 1=1;--'`\r\n\r\n\r\n**Expected behavior**\r\n\r\nI would expect that password1 and password2 are sanitised in the audit log, when appearing in the request body.\r\n\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): libapache2-mod-security2 2.9.5-1 \r\n - WebServer: Apache 2.4.52\r\n - OS (and distro):Ubuntu 22.04.3 LTS\r\n\r\n\r\n**Rule Set (please complete the following information):**\r\n - Running any public or commercial rule set? OWASP CRS 3.3.0\r\n - What is the version number? 3.3.0\r\n\r\n**I added the configuration and log files as an attachmen**\r\n\r\nAdd any other context about the problem here.\r\n[modsec_debug.log](https://github.com/owasp-modsecurity/ModSecurity/files/14358181/modsec_debug.log)\r\n[modsec_audit.log](https://github.com/owasp-modsecurity/ModSecurity/files/14358189/modsec_audit.log)\r\n[modsecurity.txt](https://github.com/owasp-modsecurity/ModSecurity/files/14358197/modsecurity.txt)\r\n[security2.txt](https://github.com/owasp-modsecurity/ModSecurity/files/14358198/security2.txt)\r\n[waf_adaption.txt](https://github.com/owasp-modsecurity/ModSecurity/files/14358289/waf_adaption.txt)\r\n\r\n", + "summary": "Sanitize argument handling in the ModSecurity RequestBody fails. \n\n1. **Review Documentation**: Verify the usage of `sanitiseArg` as specified in the ModSecurity documentation. Ensure that syntax and parameters are correct.\n\n2. **Check Rule Implementation**: Confirm that the rule is properly defined in your custom rules file. Ensure that the rule is correctly applied in phase 2, as it should capture the POST body.\n\n Example rule:\n ```\n SecAction \"auditlog,phase:2,id:131,sanitiseArg:password1,sanitiseArg:password2\"\n ```\n \n3. **Inspect Debug and Audit Logs**: Analyze the debug logs (set to level 9) and audit logs to identify any issues or errors related to the sanitization process. Look for relevant entries that might indicate why sanitization is not occurring.\n\n4. **Test with cURL**: Use the provided cURL command to reproduce the issue consistently. Ensure that the command is formatted correctly and that the POST request is sent properly.\n \n Example cURL command:\n ```\n curl 'http://localhost/test' -X POST -d 'password1=xyz&password2=test&inj=1\\' or 1=1;--'\n ```\n\n5. **Validate Server Configuration**: Check the ModSecurity version and Apache configuration to ensure compatibility. You're using:\n - ModSecurity version: libapache2-mod-security2 2.9.5-1\n - Apache version: 2.4.52\n - OS: Ubuntu 22.04.3 LTS\n\n6. **Review Rule Set**: Confirm that the OWASP CRS 3.3.0 is correctly installed and that there are no conflicting rules that might override or disable the sanitization.\n\n7. **Use Minimal Configuration**: Temporarily simplify the ModSecurity configuration to isolate the issue. Disable other rules and only run the sanitization rule to see if it behaves correctly.\n\n8. **Consult Community**: If the issue persists, consider reaching out to the ModSecurity community for insights or similar experiences.\n\nBy following these steps, identify the cause of the sanitization failure in the request body and implement a suitable solution.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3089", @@ -62996,6 +65189,7 @@ "node_id": "I_kwDOABQmks6A_m39", "title": "SecAuditLogFormat set to JSON prints logs in native format aswell", "body": "**Describe the bug**\r\n\r\nUnable to get json logs for Modsecurity in K8s ingress-nginx even after setting `SecAuditLogFormat: JSON`. \r\n\r\nI am setting `SecAuditLogFormat: JSON` and I want that the logs should be printed in json format only in ingress controller pod. Is it possible to remove/disable the logs in non json format from modsecurity if `SecAuditLogFormat: JSON`.\r\n\r\n\r\n**Logs and dumps**\r\n\r\n**Current behaviour** - If you see in below logs, line number 1 & 2 are not in json format and we do not need these type of logs if `SecAuditLogFormat: JSON` .\r\n\r\n```bash\r\n(1) 2024/03/01 20:24:46 [error] 159#159: *2640 [client 111.111.11.11] ModSecurity: Access denied with code 403 (phase 2). Matched \"Operator `Rx' with parameter `.*[Cc]lass\\..*' against variable `FULL_REQUEST' (Value: `host: example.com\\x0auser-agent: curl/7.81.0\\x0aaccept: */*\\x0ax-test: Class.devi (22 characters omitted)' ) [file \"/etc/nginx/owasp-modsecurity-crs/custom-modsec-config/custom-rules.conf\"] [line \"1\"] [id \"1\"] [rev \"\"] [msg \"spring4shell Class detected\"] [data \"\"] [severity \"0\"] [ver \"\"] [maturity \"0\"] [accuracy \"0\"] [hostname \"11.1.1.111\"] [uri \"/\"] [unique_id \"170932468636.278391\"] [ref \"o0,104v118,104\"], client: 111.111.11.11, server: example.com, request: \"HEAD / HTTP/2.0\", host: \"example.com\"\r\n\r\n(2) 2024/03/01 20:24:46 [error] 159#159: *2640 [client 111.111.11.11] ModSecurity: Access denied with code 403 (phase 2). Matched \"Operator `Rx' with parameter `.*[Cc]lass\\..*' against variable `FULL_REQUEST' (Value: `host: example.com\\x0auser-agent: curl/7.81.0\\x0aaccept: */*\\x0ax-test: Class.devi (22 characters omitted)' ) [file \"/etc/nginx/owasp-modsecurity-crs/custom-modsec-config/custom-rules.conf\"] [line \"1\"] [id \"1\"] [rev \"\"] [msg \"spring4shell Class detected\"] [data \"\"] [severity \"0\"] [ver \"\"] [maturity \"0\"] [accuracy \"0\"] [hostname \"11.1.1.111\"] [uri \"/\"] [unique_id \"170932468683.123247\"] [ref \"o0,104v118,104\"], client: 111.111.11.11, server: example.com, request: \"HEAD / HTTP/2.0\", host: \"example.com\"\r\n\r\n(3) { \"time_local\": \"01/Mar/2024:20:24:46 +0000\", \"remote_addr\": \"111.111.11.11\", \"remote_user\": \"\",\"request\": \"HEAD / HTTP/2.0\", \"status\": \"403\", \"body_bytes_sent\": \"0\", \"request_time\": \"0.003\", \"http_referrer\": \"\", \"http_user_agent\": \"curl/7.81.0\", \"http_x_forwarded_for\": \"111.111.11.11\", \"http_session\": \"\", \"upstream_response_time\": \"0.003\", \"method\": \"HEAD\", \"request_uri\": \"/\", \"host\": \"example.com\", \"x-b3-traceid\": \"01aekjkjh122118f60\" }\r\n\r\n(4) {\"transaction\":{\"client_ip\":\"111.111.11.11\",\"time_stamp\":\"Fri Mar 1 20:24:46 2024\",\"server_id\":\"a0490583sasasa879asas98asee8c49fe184b3\",\"client_port\":15943,\"host_ip\":\"11.1.1.111\",\"host_port\":443,\"unique_id\":\"170932468683.123247\",\"request\":{\"method\":\"HEAD\",\"http_version\":2.0,\"uri\":\"/\",\"body\":\"\",\"headers\":{\"host\":\"example.com\",\"user-agent\":\"curl/7.81.0\",\"accept\":\"*/*\",\"x-test\":\"Class.devil.java.com\"}},\"response\":{\"http_code\":403,\"headers\":{\"Server\":\"\",\"Server\":\"\",\"Date\":\"Fri, 01 Mar 2024 20:24:46 GMT\",\"Content-Type\":\"text/html\",\"X-Request-Id\":\"\",\"Connection\":\"close\",\"X-XSS-Protection\":\"0\",\"Vary\":\"Accept-Encoding\",\"Content-Security-Policy\":\"frame-src 'self'; frame-ancestors 'self'; object-src 'self';\",\"Referrer-Policy\":\"strict-origin-when-cross-origin\"}},\"producer\":{\"modsecurity\":\"ModSecurity v3.0.8 (Linux)\",\"connector\":\"ModSecurity-nginx v1.0.3\",\"secrules_engine\":\"Enabled\",\"components\":[\"OWASP_CRS/3.3.5\\\"\"]},\"messages\":[{\"message\":\"spring4shell Class detected\",\"details\":{\"match\":\"Matched \\\"Operator `Rx' with parameter `.*[Cc]lass\\\\..*' against variable `FULL_REQUEST' (Value: `host: example.com\\\\x0auser-agent: curl/7.81.0\\\\x0aaccept: */*\\\\x0ax-test: Class.devi (22 characters omitted)' )\",\"reference\":\"o0,104v118,104\",\"ruleId\":\"1\",\"file\":\"/etc/nginx/owasp-modsecurity-crs/custom-modsec-config/custom-rules.conf\",\"lineNumber\":\"1\",\"data\":\"\",\"severity\":\"0\",\"ver\":\"\",\"rev\":\"\",\"tags\":[],\"maturity\":\"0\",\"accuracy\":\"0\"}}]}}\r\n```\r\n\r\n**To Reproduce**\r\n\r\nEnable modsecurity with ingress-nginx in k8s cluster . Use below configmap and the server configuration as defined in below server section\r\n\r\nModsecurity ConfigMap:\r\n```yaml\r\napiVersion: v1\r\ndata:\r\n custom-rules.conf: |-\r\n SecRule FULL_REQUEST \"@rx .*[Cc]lass\\..*\" \"id:1,phase:2,deny,status:403,msg:spring4shell Class detected,ctl:ruleEngine=On\r\n SecRule FULL_REQUEST \"@rx .*tomcatwar\\.jsp.*\" \"id:2,phase:2,deny,status:403,msg:spring4shell tomcatwar detected,ctl:ruleEngine=On\r\n SecRule REMOTE_ADDR \"@ipMatch 127.0.0.1\" \"id:3,phase:1,pass,nolog,ctl:ruleEngine=Off\r\n SecRule FULL_REQUEST \"@rx .*java\\.io\\.InputStream.*\" \"id:4,phase:2,deny,status:403,msg:spring4shell InputStream detected,ctl:ruleEngine=On\r\n nginx-modsecurity.conf: |-\r\n SecRuleEngine DetectionOnly\r\n SecAuditLog /dev/stdout\r\n SecAuditEngine RelevantOnly\r\n SecAuditLogFormat JSON\r\n SecAuditLogRelevantStatus \"^(?:4(?!04))\r\n SecAction \"id:900260,phase:1,nolog,pass,t:none,setvar:'tx.static_extensions=/.jpg/ /.jpeg/ /.png/ /.gif/ /.js/ /.css/ /.ico/ /.svg/ /.webp/'\r\n SecAction \"id:900700,phase:1,nolog,pass,t:none,setvar:'tx.dos_burst_time_slice=60',setvar:'tx.dos_counter_threshold=10000',setvar:'tx.dos_block_timeout=600'\r\n Include /etc/nginx/owasp-modsecurity-crs/crs-setup.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/custom-modsec-config/custom-rules.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-901-INITIALIZATION.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-905-COMMON-EXCEPTIONS.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-912-DOS-PROTECTION.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-913-SCANNER-DETECTION.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-921-PROTOCOL-ATTACK.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-930-APPLICATION-ATTACK-LFI.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-931-APPLICATION-ATTACK-RFI.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-933-APPLICATION-ATTACK-PHP.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-934-APPLICATION-ATTACK-NODEJS.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-944-APPLICATION-ATTACK-JAVA.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-950-DATA-LEAKAGES.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-951-DATA-LEAKAGES-SQL.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-952-DATA-LEAKAGES-JAVA.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-953-DATA-LEAKAGES-PHP.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/REQUEST-949-BLOCKING-EVALUATION.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-959-BLOCKING-EVALUATION.conf\r\n Include /etc/nginx/owasp-modsecurity-crs/rules/RESPONSE-980-CORRELATION.conf\r\nkind: ConfigMap\r\nmetadata:\r\n name: modsecurity-config\r\n namespace: demo\r\n```\r\n\r\nA **curl** command line that mimics the original request and reproduces the problem. Or a ModSecurity v3 test case.\r\n\r\n```\r\ncurl -I -H \"X-test: Class.devil.java.com\" \"https://example.com\"\r\n```\r\n\r\n\r\n**Expected behavior**\r\n\r\n Want only json logs to be printed as line number 2 in below logs and should not print line number 1 & 2 as in above logs, when `SecAuditLogFormat: JSON`. \r\n\r\n```bash\r\n(1) { \"time_local\": \"01/Mar/2024:20:24:46 +0000\", \"remote_addr\": \"111.111.11.11\", \"remote_user\": \"\",\"request\": \"HEAD / HTTP/2.0\", \"status\": \"403\", \"body_bytes_sent\": \"0\", \"request_time\": \"0.003\", \"http_referrer\": \"\", \"http_user_agent\": \"curl/7.81.0\", \"http_x_forwarded_for\": \"111.111.11.11\", \"http_session\": \"\", \"upstream_response_time\": \"0.003\", \"method\": \"HEAD\", \"request_uri\": \"/\", \"host\": \"example.com\", \"x-b3-traceid\": \"01aekjkjh122118f60\" }\r\n\r\n(2) {\"transaction\":{\"client_ip\":\"111.111.11.11\",\"time_stamp\":\"Fri Mar 1 20:24:46 2024\",\"server_id\":\"a0490583sasasa879asas98asee8c49fe184b3\",\"client_port\":15943,\"host_ip\":\"11.1.1.111\",\"host_port\":443,\"unique_id\":\"170932468683.123247\",\"request\":{\"method\":\"HEAD\",\"http_version\":2.0,\"uri\":\"/\",\"body\":\"\",\"headers\":{\"host\":\"example.com\",\"user-agent\":\"curl/7.81.0\",\"accept\":\"*/*\",\"x-test\":\"Class.devil.java.com\"}},\"response\":{\"http_code\":403,\"headers\":{\"Server\":\"\",\"Server\":\"\",\"Date\":\"Fri, 01 Mar 2024 20:24:46 GMT\",\"Content-Type\":\"text/html\",\"X-Request-Id\":\"\",\"Connection\":\"close\",\"X-XSS-Protection\":\"0\",\"Vary\":\"Accept-Encoding\",\"Content-Security-Policy\":\"frame-src 'self'; frame-ancestors 'self'; object-src 'self';\",\"Referrer-Policy\":\"strict-origin-when-cross-origin\"}},\"producer\":{\"modsecurity\":\"ModSecurity v3.0.8 (Linux)\",\"connector\":\"ModSecurity-nginx v1.0.3\",\"secrules_engine\":\"Enabled\",\"components\":[\"OWASP_CRS/3.3.5\\\"\"]},\"messages\":[{\"message\":\"spring4shell Class detected\",\"details\":{\"match\":\"Matched \\\"Operator `Rx' with parameter `.*[Cc]lass\\\\..*' against variable `FULL_REQUEST' (Value: `host: example.com\\\\x0auser-agent: curl/7.81.0\\\\x0aaccept: */*\\\\x0ax-test: Class.devi (22 characters omitted)' )\",\"reference\":\"o0,104v118,104\",\"ruleId\":\"1\",\"file\":\"/etc/nginx/owasp-modsecurity-crs/custom-modsec-config/custom-rules.conf\",\"lineNumber\":\"1\",\"data\":\"\",\"severity\":\"0\",\"ver\":\"\",\"rev\":\"\",\"tags\":[],\"maturity\":\"0\",\"accuracy\":\"0\"}}]}}\r\n```\r\n\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): ModSecurity v3.0.8 with nginx-connector v1.0.3\r\n - WebServer: ingress-nginx-v1.9.5\r\n - OS (and distro): Linux\r\n\r\n\r\n\r\n**Rule Set (please complete the following information):**\r\n - Running any public or commercial rule set? SpiderLabs commercial rules\r\n - What is the version number? OWASP_CRS/3.3.5\r\n \r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n", + "summary": "**Address the Bug with ModSecurity JSON Logging in K8s NGINX Ingress**\n\n1. Identify the issue: `SecAuditLogFormat: JSON` is set, but logs are still being printed in a non-JSON format. Ensure that your goal is to capture only JSON logs in the ingress controller pod.\n\n2. Review the current configuration:\n - Check the `Modsecurity ConfigMap` to verify the following settings:\n ```yaml\n SecAuditLog /dev/stdout\n SecAuditEngine RelevantOnly\n SecAuditLogFormat JSON\n SecAuditLogRelevantStatus \"^(?:4(?!04))\"\n ```\n - Confirm that these configurations are correctly applied to the NGINX ingress controller.\n\n3. Investigate the log generation:\n - Analyze the logs to determine why non-JSON entries are still present. The entries (1) and (2) indicate that ModSecurity is generating error logs in a standard format despite JSON settings.\n\n4. Test potential fixes:\n - Check if there are any other configurations or directives overriding the `SecAuditLogFormat` setting. Look into the NGINX configuration files and ensure that logging directives do not conflict with your desired format.\n - Consider adding `SecAuditLog /dev/stdout` within the appropriate context to ensure it captures JSON logs only.\n\n5. Modify or update the ModSecurity rules:\n - Review and adjust rule settings to ensure they align with the JSON logging output. Investigate if certain rules or logging behaviors might be generating the non-JSON logs.\n - Confirm that you're utilizing the latest versions of ModSecurity and its connector to avoid any known issues.\n\n6. Test the setup:\n - After applying changes, use `curl` to simulate requests:\n ```bash\n curl -I -H \"X-test: Class.devil.java.com\" \"https://example.com\"\n ```\n - Verify that only JSON-formatted logs appear after executing the test.\n\n7. Document findings and configurations:\n - Keep detailed notes on configuration changes, testing results, and any further adjustments made to help with future troubleshooting.\n\nBy following these steps, focus on adjusting configurations and testing thoroughly to ensure only JSON logs are generated as intended.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3100", @@ -63024,6 +65218,7 @@ "node_id": "I_kwDOABQmks6BS6_x", "title": "Debian package dependencies are broken", "body": "Hi @airween,\r\n\r\nI'm trying to install modsecurity3 latest for nginx from sid.\r\n\r\nApparently there is a minor fix in 3.0.12 leading to a version 3.0.12-1.1 .\r\nHowever this fix has been added only to libmodsecurity3-dev and not libmodsecurity3.\r\n\r\nSo I get an error on installing\r\n\r\n```\r\n > [linux/amd64 stage-0 9/35] RUN set -x && apt-get update && apt-get -t sid install -y --no-install-recommends libmodsecurity-dev=3.0.12-1.1 libmodsecurity3=3.0.12-1:\r\n6.700 Reading state information...\r\n6.939 Some packages could not be installed. This may mean that you have\r\n6.939 requested an impossible situation or if you are using the unstable\r\n6.939 distribution that some required packages have not yet been created\r\n6.939 or been moved out of Incoming.\r\n6.939 The following information may help to resolve the situation:\r\n6.939 \r\n6.940 The following packages have unmet dependencies:\r\n7.205 libmodsecurity3t64 : Breaks: libmodsecurity3 (< 3.0.12-1.1)\r\n7.211 E: Unable to correct problems, you have held broken packages.\r\n------\r\n```\r\n_IIRC 3.0.12-1.1 was available for some time, but has been removed. Could that be?_\r\n\r\nFor the time being I have removed libmodsecurity-dev and changed my Dockerfile to work around this.\r\n\r\nCan you please have a look?\r\n\r\nPeter\r\n\r\n", + "summary": "Investigate broken Debian package dependencies for modsecurity3 installation on nginx. Identify the issue stemming from the missing update in libmodsecurity3, which should match version 3.0.12-1.1, currently available only in libmodsecurity3-dev.\n\n1. Verify the availability of libmodsecurity3 version 3.0.12-1.1 in the Debian sid repository.\n2. Check if the package has been removed or if it exists in a different repository.\n3. Review the output of `apt-get` to confirm the unmet dependencies and broken packages.\n4. Consider adding the `-t sid` option properly to ensure you are targeting the correct repository.\n\nPossible first steps:\n- Run `apt-cache policy libmodsecurity3` to see the available versions.\n- Update the Dockerfile to reflect any necessary changes based on findings.\n- If 3.0.12-1.1 is indeed missing, attempt to pin the package version or explore alternative installation methods (such as building from source).", "state": "open", "state_reason": "reopened", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3102", @@ -63052,6 +65247,7 @@ "node_id": "I_kwDOABQmks6Bbp0L", "title": "NULL pointer checks & compiler warnings", "body": "I'm rewriting my PR for NULL pointer checks.\r\nIn most places, it's as basic as adding \"if (ptr != NULL)\" but I tried to optimize it a bit.\r\nThere are some cases where a pointer cannot be NULL; it just doesn't make sense. Such an example is the \"msc\" variable used at a lot of place. It's initialized to non-NULL and checked, so it cannot be NULL (except in msc_tee.c, but that's another discussion). So, there's no need to check it in every single function. However, we'll get (depending on the compiler) warnings about null pointer dereference. Also, when doing a code review, it's not clear if it can never be null or if the developers forgot to check it.\r\nI found an almost neat solution: add \"assert(msc != NULL)\" in every function. It clarifies the code and suppresses the compiler warning ... at least in debug mode because assert() is defined to nothing in NDEBUG mode so the check is not executed - good from a performance point of view) and the compiler doesn't see that we check for nulliness.\r\nDoes anybody see a variation that would avoid the compiler warning without adding a real check at run-time?", + "summary": "Implement NULL pointer checks and suppress compiler warnings effectively. Focus on optimizing checks where certain pointers, like the \"msc\" variable, cannot be NULL due to prior initialization. \n\n1. Identify all instances where the \"msc\" variable is used and confirm its non-NULL status.\n2. Instead of adding redundant \"if (ptr != NULL)\" checks in every function, introduce the use of \"assert(msc != NULL)\" to clarify intent and silence compiler warnings in debug mode.\n3. Investigate alternative methods to ensure that the compiler recognizes the non-null nature of pointers without incurring runtime checks. \n\nConsider using compiler-specific attributes or annotations that can inform the compiler about pointer guarantees, such as `__attribute__((nonnull))` in GCC. This approach might help avoid warnings without impacting performance in production builds.\n\nProceed by implementing the assertions in a few critical functions and test to see if they effectively suppress warnings while maintaining code clarity. Then, expand the implementation to all relevant areas.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3105", @@ -63083,6 +65279,7 @@ "node_id": "I_kwDOABQmks6CiD3N", "title": "SecRuleScript actions always considered disruptive", "body": "The following block triggers an error in v3 (nginx):\r\n```\r\nSecRule REQUEST_FILENAME \"@unconditionalMatch\" \\\r\n \"id:888888,\\\r\n phase:1\\\r\n chain\"\r\n SecRuleScript test.lua \"nolog\"\r\n```\r\n\r\nThe error is:\r\n```\r\nnginx: [emerg] \"modsecurity_rules_file\" directive Disruptive actions can only be specified by chain starter rules. in /usr/local/nginx/conf/conf.d/npm.conf:13\r\nnginx: configuration file /usr/local/nginx/conf/nginx.conf test failed\r\n```\r\n\r\n**To Reproduce**\r\n\r\n1. Lua must be enabled for ModSecurity\r\n2. Use the rule block above\r\n3. Create a Lua script file like the following, named \"test.lua\":\r\n ```lua\r\n function main()\r\n return nil;\r\n end\r\n ```\r\n4. Put the Lua script into the same directory as the rule file\r\n5. Start / reload nginx\r\n\r\n**Expected behavior**\r\n\r\n`nolog` is not a disruptive action in the source code and should not trigger the error. The reason we even noticed this behaviour is, that the documentation says that actions are optional for `SecRuleScript` but the parser cannot cope with such a rule and will throw an error while parsing _the next_ rule, because it thinks that the end of the last rule wasn't reached.\r\n\r\nFull discussion: https://github.com/coreruleset/body-decompress-plugin/issues/4.\r\n\r\n**Server**\r\n - ModSecurity version (and connector): v3.0.12; connector v1.0.3\r\n - WebServer: nginx 1.25.3\r\n - OS (and distro): Debian Linux\r\n\r\n** Additional context**\r\n\r\nThe actions list of `SecRuleScript` is currently mandatory, even though the documentation says it isn't. However, the parser throws an error while parsing the _next_ rule, instead of complaining about the missing actions list. This is very confusing to users.", + "summary": "Address the issue of `SecRuleScript` actions being incorrectly treated as disruptive in ModSecurity v3 with nginx. \n\n1. **Identify the Problem**: Analyze the error encountered when using the rule block with `SecRuleScript`. The error indicates that disruptive actions can only be specified by chain starter rules, which implies a misinterpretation of the `nolog` action's status.\n\n2. **Reproduce the Issue**:\n - Ensure Lua is enabled for ModSecurity.\n - Implement the provided rule block in your configuration.\n - Create a simple Lua script named `test.lua` that returns nil.\n - Place the Lua script in the same directory as the rule file.\n - Start or reload nginx to trigger the error.\n\n3. **Investigate the Documentation**: Examine the existing documentation for `SecRuleScript` to determine whether it accurately reflects the intended behavior regarding the mandatory actions list.\n\n4. **Propose a Fix**:\n - Update the parser to correctly handle the `nolog` action as non-disruptive and allow the use of `SecRuleScript` without mandatory actions.\n - Modify the error handling to provide clearer feedback when actions are missing, rather than deferring the error to the next rule.\n\n5. **Test the Solution**: After implementing changes, retest with the original rule block to confirm that the error no longer occurs and that the configuration file passes validation.\n\n6. **Document Changes**: Ensure that any modifications to the behavior of `SecRuleScript` are well documented for future reference, clarifying the optional nature of actions. \n\n7. **Engage with the Community**: Share the findings and proposed solutions in the GitHub issue discussion to gather feedback and further insights from other developers. \n\nBy tackling these steps, address the confusion surrounding `SecRuleScript` actions and improve the user experience with ModSecurity and nginx configurations.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3108", @@ -63100,8 +65297,8 @@ 976 ], "labels": [ - 399, - 400 + 400, + 399 ] } }, @@ -63114,6 +65311,7 @@ "node_id": "I_kwDOABQmks6C1J1y", "title": "libmodsecurity3: Request body is not logged", "body": "**Describe the bug**\r\n\r\nlibModSecurity3 does not log the request body in the audit log, although the triggered rules, response body and request/response headers are logged.\r\n\r\n**Logs and dumps**\r\n\r\n```\r\n---38ayZkm8---A--\r\n[19/Mar/2024:13:20:01 +0000] 171085440197.908971 127.0.0.1 45226 127.0.0.1 80\r\n---38ayZkm8---B--\r\nPOST / HTTP/1.1\r\nHost: 127.0.0.1\r\nUser-Agent: curl/7.81.0\r\nAccept: */*\r\nContent-Length: 10\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n---38ayZkm8---D--\r\n\r\n---38ayZkm8---E--\r\n\\x0d\\x0a403 Forbidden\\x0d\\x0a\\x0d\\x0a

403 Forbidden

\\x0d\\x0a
nginx/1.18.0 (Ubuntu)
\\x0d\\x0a\\x0d\\x0a\\x0d\\x0a\r\n\r\n---38ayZkm8---F--\r\nHTTP/1.1 403\r\nServer: nginx/1.18.0 (Ubuntu)\r\nDate: Tue, 19 Mar 2024 13:20:01 GMT\r\nContent-Length: 162\r\nContent-Type: text/html\r\nConnection: keep-alive\r\n\r\n---38ayZkm8---H--\r\nModSecurity: Warning. Matched \"Operator `Rx' with parameter `(?:^([\\d.]+|\\[[\\da-f:]+\\]|[\\da-f:]+)(:[\\d]+)?$)' against variable `REQUEST_HEADERS:Host' (Value: `127.0.0.1' ) [file \"/etc/modsecurity/coreruleset/rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf\"] [line \"772\"] [id \"920350\"] [rev \"\"] [msg \"Host header is a numeric IP address\"] [data \"127.0.0.1\"] [severity \"4\"] [ver \"OWASP_CRS/4.0.1-dev\"] [maturity \"0\"] [accuracy \"0\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-protocol\"] [tag \"paranoia-level/1\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/210/272\"] [tag \"PCI/6.5.10\"] [hostname \"127.0.0.1\"] [uri \"/\"] [unique_id \"171085440197.908971\"] [ref \"o0,9o0,9v22,9\"]\r\nModSecurity: Warning. detected XSS using libinjection. [file \"/etc/modsecurity/coreruleset/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf\"] [line \"82\"] [id \"941100\"] [rev \"\"] [msg \"XSS Attack Detected via libinjection\"] [data \"Matched Data: XSS data found within ARGS:a: phpinfo();\\\\x5cr\\\\x5cn\\\\x5ccrlf\\\\x5c%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-protocol\"] [tag \"paranoia-level/3\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/210/272\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.777743 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Found 78 byte(s) in ARGS:a outside range: 38,44-46,48-58,61,65-90,95,97-122. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf\"] [line \"1815\"] [id \"920273\"] [msg \"Invalid character in request (outside of very strict set)\"] [data \"ARGS:a=1&authDomain='`\\\\x22>phpinfo();\\\\x5cr\\\\x5cn\\\\x5ccrlf\\\\x5c%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-protocol\"] [tag \"paranoia-level/4\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/210/272\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.778148 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?i)(?:[/\\\\\\\\x5c]|%(?:2(?:f|5(?:2f|5c|c(?:1%259c|0%25af))|%46)|5c|c(?:0%(?:[2aq]f|5c|9v)|1%(?:[19p]c|8s|af))|(?:bg%q|(?:e|f(?:8%8)?0%8)0%80%a)f|u(?:221[56]|EFC8|F025|002f)|%3(?:2(?:%(?:%6|4)6|F)|5%%63)|1u)|0x(?:2f|5c))(?:\\\\\\\\.(?:%0[01]|\\\\\\\\?)?|\\\\\\\\?\\\\\\\\.?|%(?:2( ...\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-930-APPLICATION-ATTACK-LFI.conf\"] [line \"53\"] [id \"930100\"] [msg \"Path Traversal Attack (/../) or (/.../)\"] [data \"Matched Data: %2F..%2F found within ARGS:a: 1&authDomain=%27%60%22><%2fscript>%2f%2f&delete=%27%60%22><%2fscript>%3Cphp%3Ephpinfo%28%29%3B%3C%2Fphp%3E%5Cr%5Cn%5Ccrlf%5C%2522OR%25201%3D1%3B..%2F..%2F..%2F..%2F..%2F..%2Fetc%2Fpasswd%26dos%3Dcom.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [t [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.778339 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?:(?:^|[\\\\\\\\x5c/;])\\\\\\\\.{2,3}[\\\\\\\\x5c/;]|[\\\\\\\\x5c/;]\\\\\\\\.{2,3}(?:[\\\\\\\\x5c/;]|$))\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-930-APPLICATION-ATTACK-LFI.conf\"] [line \"86\"] [id \"930110\"] [msg \"Path Traversal Attack (/../) or (/.../)\"] [data \"Matched Data: ;../ found within ARGS:a: 1&authDomain='`\\\\x22>phpinfo();\\\\x5cr\\\\x5cn\\\\x5ccrlf\\\\x5c%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-lfi\"] [tag \"paranoia-level/1\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/255/153/126\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.778435 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?:(?:^|[\\\\\\\\x5c/;])\\\\\\\\.{2,3}[\\\\\\\\x5c/;]|[\\\\\\\\x5c/;]\\\\\\\\.{2,3}(?:[\\\\\\\\x5c/;]|$))\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-930-APPLICATION-ATTACK-LFI.conf\"] [line \"86\"] [id \"930110\"] [msg \"Path Traversal Attack (/../) or (/.../)\"] [data \"Matched Data: /../ found within ARGS:a: 1&authdomain=`>phpinfo() rncrlf%22or%201=1 ../../../../../../etc/passwd&dos=com.java.bean.templateinjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-lfi\"] [tag \"paranoia-level/1\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/255/153/126\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.779233 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?i)(?:b[\\\\\"'\\\\\\\\)\\\\\\\\[\\\\\\\\x5c]*(?:(?:(?:\\\\\\\\|\\\\\\\\||&&)[\\\\\\\\s\\\\\\\\x0b]*)?\\\\\\\\$[!#\\\\\\\\(\\\\\\\\*\\\\\\\\-0-9\\\\\\\\?@_a-\\\\\\\\{]*)?\\\\\\\\x5c?u[\\\\\"'\\\\\\\\)\\\\\\\\[\\\\\\\\x5c]*(?:(?:(?:\\\\\\\\|\\\\\\\\||&&)[\\\\\\\\s\\\\\\\\x0b]*)?\\\\\\\\$[!#\\\\\\\\(\\\\\\\\*\\\\\\\\-0-9\\\\\\\\?@_a-\\\\\\\\{]*)?\\\\\\\\x5c?s[\\\\\"'\\\\\\\\)\\\\\\\\[\\\\\\\\x5c]*(?:(?:(?:\\\\\\\\|\\\\\\\\||&&)[\\\\\\\\s\\\\\\\\x0b]*)?\\\\\\\\$[!#\\\\\\\\(\\\\\\\\*\\\\\\\\-0-9\\\\ ...\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf\"] [line \"200\"] [id \"932235\"] [msg \"Remote Command Execution: Unix Command Injection (command without evasion)\"] [data \"Matched Data: =open(../etc/passwd found within ARGS:a: 1&authDomain=%27%60%22><%2fscript>%2f%2f&delete=%27%60%22><%2fscript>%3Cphp%3Ephpinfo%28%29%3B%3C%2Fphp%3E%5Cr%5Cn%5Ccrlf%5C%2522OR%25201%3D1%3B..%2F..%2F..%2F..%2F..%2F..%2Fetc%2Fpasswd%26dos%3Dcom.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [ [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.779926 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Matched phrase \"etc/passwd\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf\"] [line \"575\"] [id \"932160\"] [msg \"Remote Command Execution: Unix Shell Code Found\"] [data \"Matched Data: etc/passwd found within ARGS:a: 1&authdomain=%27%60%22><%2fscript>%2f%2f&delete=%27%60%22><%2fscript>phpinfo();\\\\x5cr\\\\x5cn\\\\x5ccrlf\\\\x5c\\\\x22or 1=1;../../../../../../etc/passwd&dos=com.java.bean.templateinjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-rce\"] [tag \"paranoia-level/2\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/248/88\"] [tag \"PCI/6.5.2\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.780762 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?i)(?:^|b[\\\\\"'\\\\\\\\)\\\\\\\\[\\\\\\\\x5c]*(?:(?:(?:\\\\\\\\|\\\\\\\\||&&)[\\\\\\\\s\\\\\\\\x0b]*)?\\\\\\\\$[!#\\\\\\\\(\\\\\\\\*\\\\\\\\-0-9\\\\\\\\?@_a-\\\\\\\\{]*)?\\\\\\\\x5c?u[\\\\\"'\\\\\\\\)\\\\\\\\[\\\\\\\\x5c]*(?:(?:(?:\\\\\\\\|\\\\\\\\||&&)[\\\\\\\\s\\\\\\\\x0b]*)?\\\\\\\\$[!#\\\\\\\\(\\\\\\\\*\\\\\\\\-0-9\\\\\\\\?@_a-\\\\\\\\{]*)?\\\\\\\\x5c?s[\\\\\"'\\\\\\\\)\\\\\\\\[\\\\\\\\x5c]*(?:(?:(?:\\\\\\\\|\\\\\\\\||&&)[\\\\\\\\s\\\\\\\\x0b]*)?\\\\\\\\$[!#\\\\\\\\(\\\\\\\\*\\\\\\\\-0- ...\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf\"] [line \"1375\"] [id \"932236\"] [msg \"Remote Command Execution: Unix Command Injection (command without evasion)\"] [data \"Matched Data: =open(../etc/passwd found within ARGS:a: 1&authDomain=%27%60%22><%2fscript>%2f%2f&delete=%27%60%22><%2fscript>%3Cphp%3Ephpinfo%28%29%3B%3C%2Fphp%3E%5Cr%5Cn%5Ccrlf%5C%2522OR%25201%3D1%3B..%2F..%2F..%2F..%2F..%2F..%2Fetc%2Fpasswd%26dos%3Dcom.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.781483 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Matched phrase \"phpinfo\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-933-APPLICATION-ATTACK-PHP.conf\"] [line \"297\"] [id \"933150\"] [msg \"PHP Injection Attack: High-Risk PHP Function Name Found\"] [data \"Matched Data: phpinfo found within ARGS:a: 1&authDomain=%27%60%22><%2fscript>%2f%2f&delete=%27%60%22><%2fscript>%3Cphp%3Ephpinfo%28%29%3B%3C%2Fphp%3E%5Cr%5Cn%5Ccrlf%5C%2522OR%25201%3D1%3B..%2F..%2F..%2F..%2F..%2F..%2Fetc%2Fpasswd%26dos%3Dcom.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-php\"] [tag \"platform-multi\"] [tag \"attack-injection-php\"] [tag \"paranoia-level/1\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/242\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.781718 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Matched phrase \"(\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-933-APPLICATION-ATTACK-PHP.conf\"] [line \"549\"] [id \"933151\"] [msg \"PHP Injection Attack: Medium-Risk PHP Function Name Found\"] [data \"Matched Data: phpinfo found within ARGS:a: 1&authDomain=%27%60%22><%2fscript>%2f%2f&delete=%27%60%22><%2fscript>%3Cphp%3Ephpinfo%28%29%3B%3C%2Fphp%3E%5Cr%5Cn%5Ccrlf%5C%2522OR%25201%3D1%3B..%2F..%2F..%2F..%2F..%2F..%2Fetc%2Fpasswd%26dos%3Dcom.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-php\"] [tag \"platform-multi\"] [tag \"attack-injection-php\"] [tag \"paranoia-level/2\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/242\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.781943 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"Process[\\\\\\\\s\\\\\\\\x0b]*\\\\\\\\.[\\\\\\\\s\\\\\\\\x0b]*spawn[\\\\\\\\s\\\\\\\\x0b]*\\\\\\\\(\" at REQUEST_COOKIES:If. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-934-APPLICATION-ATTACK-GENERIC.conf\"] [line \"173\"] [id \"934150\"] [msg \"Ruby Injection Attack\"] [data \"Matched Data: Process.spawn( found within REQUEST_COOKIES:If: while(Process.spawn(1337)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-ruby\"] [tag \"platform-multi\"] [tag \"attack-rce\"] [tag \"attack-injection-generic\"] [tag \"paranoia-level/1\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/242\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.782000 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?:close|exists|fork|(?:ope|spaw)n|re(?:ad|quire)|w(?:atch|rite))[\\\\\\\\s\\\\\\\\x0b]*\\\\\\\\(\" at REQUEST_COOKIES:If. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-934-APPLICATION-ATTACK-GENERIC.conf\"] [line \"263\"] [id \"934101\"] [msg \"Node.js Injection Attack 2/2\"] [data \"Matched Data: spawn( found within REQUEST_COOKIES:If: while(Process.spawn(1337)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-javascript\"] [tag \"platform-multi\"] [tag \"attack-rce\"] [tag \"attack-injection-generic\"] [tag \"paranoia-level/2\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/242\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.782052 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?:close|exists|fork|(?:ope|spaw)n|re(?:ad|quire)|w(?:atch|rite))[\\\\\\\\s\\\\\\\\x0b]*\\\\\\\\(\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-934-APPLICATION-ATTACK-GENERIC.conf\"] [line \"263\"] [id \"934101\"] [msg \"Node.js Injection Attack 2/2\"] [data \"Matched Data: open( found within ARGS:a: 1&authDomain=%27%60%22><%2fscript>%2f%2f&delete=%27%60%22><%2fscript>%3Cphp%3Ephpinfo%28%29%3B%3C%2Fphp%3E%5Cr%5Cn%5Ccrlf%5C%2522OR%25201%3D1%3B..%2F..%2F..%2F..%2F..%2F..%2Fetc%2Fpasswd%26dos%3Dcom.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-javascript\"] [tag \"platform-multi\"] [tag \"attack-rce\"] [tag \"attack-injection-generic\"] [tag \"paranoia-level/2\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/242\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.782104 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?:close|exists|fork|(?:ope|spaw)n|re(?:ad|quire)|w(?:atch|rite))[\\\\\\\\s\\\\\\\\x0b]*\\\\\\\\(\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-934-APPLICATION-ATTACK-GENERIC.conf\"] [line \"263\"] [id \"934101\"] [msg \"Node.js Injection Attack 2/2\"] [data \"Matched Data: open( found within ARGS:a: 1&authDomain='`\\\\x22>phpinfo();\\\\x5cr\\\\x5cn\\\\x5ccrlf\\\\x5c%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-javascript\"] [tag \"platform-multi\"] [tag \"attack-rce\"] [tag \"attack-injection-generic\"] [tag \"paranoia-level/2\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/242\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.782151 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?:close|exists|fork|(?:ope|spaw)n|re(?:ad|quire)|w(?:atch|rite))[\\\\\\\\s\\\\\\\\x0b]*\\\\\\\\(\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-934-APPLICATION-ATTACK-GENERIC.conf\"] [line \"263\"] [id \"934101\"] [msg \"Node.js Injection Attack 2/2\"] [data \"Matched Data: open( found within ARGS:a: 1&authDomain='`\\\\x22>phpinfo();\\\\x0d\\\\x0acrlf%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-javascript\"] [tag \"platform-multi\"] [tag \"attack-rce\"] [tag \"attack-injection-generic\"] [tag \"paranoia-level/2\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/242\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.782284 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. detected XSS using libinjection. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf\"] [line \"100\"] [id \"941100\"] [msg \"XSS Attack Detected via libinjection\"] [data \"Matched Data: XSS data found within ARGS:a: 1&authDomain='`\\\\x22>phpinfo();\\\\x0d\\\\x0acrlf%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-xss\"] [tag \"xss-perf-disable\"] [tag \"paranoia-level/1\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/242\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.782339 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?i)]*>[\\\\\\\\s\\\\\\\\S]*?\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf\"] [line \"127\"] [id \"941110\"] [msg \"XSS Filter - Category 1: Script Tag Vector\"] [data \"Matched Data: phpinfo();\\\\x0d\\\\x0acrlf%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-xss\"] [tag \"xss-perf-disable\"] [tag \"paranoia-level/1\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/242\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.782481 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?i)<[^0-9<>A-Z_a-z]*(?:[^\\\\\\\\s\\\\\\\\x0b\\\\\"'<>]*:)?[^0-9<>A-Z_a-z]*[^0-9A-Z_a-z]*?(?:s[^0-9A-Z_a-z]*?(?:c[^0-9A-Z_a-z]*?r[^0-9A-Z_a-z]*?i[^0-9A-Z_a-z]*?p[^0-9A-Z_a-z]*?t|t[^0-9A-Z_a-z]*?y[^0-9A-Z_a-z]*?l[^0-9A-Z_a-z]*?e|v[^0-9A-Z_a-z]*?g|e[^0-9A-Z_a-z]*?t[^0- ...\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf\"] [line \"219\"] [id \"941160\"] [msg \"NoScript XSS InjectionChecker: HTML Injection\"] [data \"Matched Data: '`\\\\x22>phpinfo();\\\\x0d\\\\x0acrlf%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\" [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.782859 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?i)\\\\\\\\b(?:s(?:tyle|rc)|href)\\\\\\\\b[\\\\\\\\s\\\\\\\\S]*?=\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf\"] [line \"853\"] [id \"941150\"] [msg \"XSS Filter - Category 5: Disallowed HTML Attributes\"] [data \"Matched Data: style>phpinfo();\\\\x0d\\\\x0acrlf%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-xss\"] [tag \"xss-perf-disable\"] [tag \"paranoia-level/2\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/242\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.782937 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"<(?:a|abbr|acronym|address|applet|area|audioscope|b|base|basefront|bdo|bgsound|big|blackface|blink|blockquote|body|bq|br|button|caption|center|cite|code|col|colgroup|comment|dd|del|dfn|dir|div|dl|dt|em|embed|fieldset|fn|font|form|frame|frameset|h1|head ...\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf\"] [line \"969\"] [id \"941320\"] [msg \"Possible XSS Attack Detected - HTML Tag Handler\"] [data \"Matched Data: <%2fscript>%2f%2f&delete=%27%60%22><%2fscript>%3cphp%3ephpinfo%28%29%3b%3c%2fphp%3e%5cr%5cn%5ccrlf%5c%2522or%25201%3d1%3b..%2f..%2f..%2f..%2f..%2f..%2fetc%2fpasswd%26dos%3dcom.java.bean.templateinjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-m [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.783020 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. detected SQLi using libinjection with fingerprint 'T(n(1' [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf\"] [line \"66\"] [id \"942100\"] [msg \"SQL Injection Attack Detected via libinjection\"] [data \"Matched Data: T(n(1 found within REQUEST_COOKIES:If: while(Process.spawn(1337)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-sqli\"] [tag \"paranoia-level/1\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/248/66\"] [tag \"PCI/6.5.2\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.783467 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Match of \"streq %{TX.2}\" against \"TX:1\" required. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf\"] [line \"742\"] [id \"942131\"] [msg \"SQL Injection Attack: SQL Boolean-based attack detected\"] [data \"Matched Data: php>phpinfo found within ARGS:a: php\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-sqli\"] [tag \"paranoia-level/2\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/248/66\"] [tag \"PCI/6.5.2\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.784084 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"((?:[~!@#\\\\\\\\$%\\\\\\\\^&\\\\\\\\*\\\\\\\\(\\\\\\\\)\\\\\\\\-\\\\\\\\+=\\\\\\\\{\\\\\\\\}\\\\\\\\[\\\\\\\\]\\\\\\\\|:;\\\\\"'\\\\xc2\\\\xb4\\\\xe2\\\\x80\\\\x99\\\\xe2\\\\x80\\\\x98`<>][^~!@#\\\\\\\\$%\\\\\\\\^&\\\\\\\\*\\\\\\\\(\\\\\\\\)\\\\\\\\-\\\\\\\\+=\\\\\\\\{\\\\\\\\}\\\\\\\\[\\\\\\\\]\\\\\\\\|:;\\\\\"'\\\\xc2\\\\xb4\\\\xe2\\\\x80\\\\x99\\\\xe2\\\\x80\\\\x98`<>]*?){12})\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf\"] [line \"1315\"] [id \"942430\"] [msg \"Restricted SQL Character Anomaly Detection (args): # of special characters exceeded (12)\"] [data \"Matched Data: &authDomain='`\\\\x22> found within ARGS:a: 1&authDomain='`\\\\x22>phpinfo();\\\\x5cr\\\\x5cn\\\\x5ccrlf\\\\x5c%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"WARNING\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.784185 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"(?i)[\\\\\"'`][\\\\\\\\s\\\\\\\\x0b]*?(?:(?:is[\\\\\\\\s\\\\\\\\x0b]+not|not[\\\\\\\\s\\\\\\\\x0b]+(?:like|glob|(?:betwee|i)n|null|regexp|match)|mod|div|sounds[\\\\\\\\s\\\\\\\\x0b]+like)\\\\\\\\b|[%&\\\\\\\\*\\\\\\\\+\\\\\\\\-/<->\\\\\\\\^\\\\\\\\|])\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf\"] [line \"1509\"] [id \"942520\"] [msg \"Detects basic SQL authentication bypass attempts 4.0/4\"] [data \"Matched Data: \\\\x22> found within ARGS:a: 1&authDomain='`\\\\x22>phpinfo();\\\\x5cr\\\\x5cn\\\\x5ccrlf\\\\x5c%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"CRITICAL\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-sqli\"] [tag \"paranoia-level/2\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/248/66\"] [ta [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.784279 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"((?:[~!@#\\\\\\\\$%\\\\\\\\^&\\\\\\\\*\\\\\\\\(\\\\\\\\)\\\\\\\\-\\\\\\\\+=\\\\\\\\{\\\\\\\\}\\\\\\\\[\\\\\\\\]\\\\\\\\|:;\\\\\"'\\\\xc2\\\\xb4\\\\xe2\\\\x80\\\\x99\\\\xe2\\\\x80\\\\x98`<>][^~!@#\\\\\\\\$%\\\\\\\\^&\\\\\\\\*\\\\\\\\(\\\\\\\\)\\\\\\\\-\\\\\\\\+=\\\\\\\\{\\\\\\\\}\\\\\\\\[\\\\\\\\]\\\\\\\\|:;\\\\\"'\\\\xc2\\\\xb4\\\\xe2\\\\x80\\\\x99\\\\xe2\\\\x80\\\\x98`<>]*?){6})\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf\"] [line \"1803\"] [id \"942431\"] [msg \"Restricted SQL Character Anomaly Detection (args): # of special characters exceeded (6)\"] [data \"Matched Data: &authDomain='`\\\\x22> found within ARGS:a: 1&authDomain='`\\\\x22>phpinfo();\\\\x5cr\\\\x5cn\\\\x5ccrlf\\\\x5c%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"WARNING\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-sqli\"] [tag \"paran [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.784330 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"\\\\\\\\W{4}\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf\"] [line \"1833\"] [id \"942460\"] [msg \"Meta-Character Anomaly Detection Alert - Repetitive Non-Word Characters\"] [data \"Matched Data: ='`\\\\x22 found within ARGS:a: 1&authDomain='`\\\\x22>phpinfo();\\\\x5cr\\\\x5cn\\\\x5ccrlf\\\\x5c%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"WARNING\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-sqli\"] [tag \"paranoia-level/3\"] [tag \"OWASP_CRS\"] [tag \"capec/1000/152/248/66\"] [tag \"PCI/6.5.2\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.784394 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Pattern match \"((?:[~!@#\\\\\\\\$%\\\\\\\\^&\\\\\\\\*\\\\\\\\(\\\\\\\\)\\\\\\\\-\\\\\\\\+=\\\\\\\\{\\\\\\\\}\\\\\\\\[\\\\\\\\]\\\\\\\\|:;\\\\\"'\\\\xc2\\\\xb4\\\\xe2\\\\x80\\\\x99\\\\xe2\\\\x80\\\\x98`<>][^~!@#\\\\\\\\$%\\\\\\\\^&\\\\\\\\*\\\\\\\\(\\\\\\\\)\\\\\\\\-\\\\\\\\+=\\\\\\\\{\\\\\\\\}\\\\\\\\[\\\\\\\\]\\\\\\\\|:;\\\\\"'\\\\xc2\\\\xb4\\\\xe2\\\\x80\\\\x99\\\\xe2\\\\x80\\\\x98`<>]*?){2})\" at ARGS:a. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-942-APPLICATION-ATTACK-SQLI.conf\"] [line \"1975\"] [id \"942432\"] [msg \"Restricted SQL Character Anomaly Detection (args): # of special characters exceeded (2)\"] [data \"Matched Data: &authDomain= found within ARGS:a: 1&authDomain='`\\\\x22>phpinfo();\\\\x5cr\\\\x5cn\\\\x5ccrlf\\\\x5c%22OR%201=1;../../../../../../etc/passwd&dos=com.java.bean.TemplateInjection&process.appendfile=open(../etc/passwd)\"] [severity \"WARNING\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"application-multi\"] [tag \"language-multi\"] [tag \"platform-multi\"] [tag \"attack-sqli\"] [tag \"paranoia-lev [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.784783 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Access denied with code 403 (phase 2). Operator GE matched 5 at TX:blocking_inbound_anomaly_score. [file \"/etc/modsecurity.d/owasp-crs/rules/REQUEST-949-BLOCKING-EVALUATION.conf\"] [line \"233\"] [id \"949110\"] [msg \"Inbound Anomaly Score Exceeded (Total Score: 141)\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"anomaly-evaluation\"] [tag \"OWASP_CRS\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n[Mon Mar 25 13:35:58.784997 2024] [security2:error] [pid 60:tid 140003602568896] [client 192.168.122.158:59728] [client 192.168.122.158] ModSecurity: Warning. Unconditional match in SecAction. [file \"/etc/modsecurity.d/owasp-crs/rules/RESPONSE-980-CORRELATION.conf\"] [line \"98\"] [id \"980170\"] [msg \"Anomaly Scores: (Inbound Scores: blocking=141, detection=141, per_pl=58-61-11-11, threshold=5) - (Outbound Scores: blocking=0, detection=0, per_pl=0-0-0-0, threshold=4) - (SQLI=30, XSS=25, RFI=0, LFI=15, RCE=45, PHPI=10, HTTP=0, SESS=0, COMBINED_SCORE=141)\"] [ver \"OWASP_CRS/4.1.0\"] [tag \"modsecurity\"] [tag \"reporting\"] [tag \"OWASP_CRS\"] [hostname \"192.168.122.158\"] [uri \"/\"] [unique_id \"ZgF9vvENo2IGM29MSPNYswAAAJc\"]\r\n```\r\n\r\n**Expected behavior**\r\n\r\nModsecurity has the ability to limit the rules by using a parameter.\r\n\r\n**Server (please complete the following information):**\r\n\r\n - ModSecurity version (and connector): 2\r\n - WebServer: Apache\r\n - OS (and distro): Docker `image: owasp/modsecurity-crs:apache`\r\n\r\n\r\n**Rule Set (please complete the following information):**\r\nFollowing Rule Set was used (commit f2ab9c3063fece423e6a4156aad145f7f7e6ef96) [CRS Rules](https://github.com/coreruleset/coreruleset/tree/main/rules)\r\n", + "summary": "Implement a feature to limit the number of rules processed per request in ModSecurity to mitigate potential denial-of-service (DoS) attacks. The current implementation allows a single request to trigger numerous rules (e.g., 125 rules for a short request), leading to high resource consumption and possible system overload, especially in SIEM or monitoring systems.\n\n### Technical Details:\n- The issue arises when specific keywords in requests match multiple ModSecurity rules, as evidenced by logs showing various types of attacks including SQL injection (SQLI), cross-site scripting (XSS), remote file inclusion (RFI), local file inclusion (LFI), and remote command execution (RCE).\n- For instance, the log output indicates 30 SQLI rules, 25 XSS rules, and high combined scores leading to an overall score of 141, prompting a 403 response due to exceeding the configured anomaly score.\n- The ModSecurity version in use is 2, with the OWASP Core Rule Set (CRS) applied.\n\n### First Steps to Tackle the Problem:\n1. **Parameter Addition**: Introduce a new parameter in ModSecurity's configuration to limit the number of rules processed per request. This could be a simple integer value that specifies the maximum number of rules to evaluate.\n \n2. **Modify Rule Evaluation Logic**: Adjust the existing rule evaluation mechanism to respect the new limit. If the limit is reached, subsequent rules should be skipped for that request.\n\n3. **Test the Implementation**: \n - Use the provided Docker compose script to replicate the environment and simulate requests that trigger multiple rules.\n - Validate that the limit is enforced and that the system behaves as expected without exceeding resource constraints.\n\n4. **Update Documentation**: Document the new parameter and its usage in the ModSecurity configuration guide to ensure users can effectively utilize this feature.\n\n5. **Consider Default Values**: Define a reasonable default for the new parameter to balance security and performance, allowing users to adjust based on their environment's needs.\n\nBy following these steps, you can effectively implement a feature that enhances the resilience of ModSecurity against potential DoS attacks triggered by excessive rule processing.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3112", @@ -63172,6 +65371,7 @@ "node_id": "I_kwDOABQmks6D66z2", "title": "Error: Could not set variable \"ip.brute_force_counter\" and Could not set variable \"ip.xmlrpc_counter\" as the collection does not exist.", "body": "Hello,\r\n\r\nI'm having issue with Modsecurity 2.9 (it is installed into a plesk server latest version with latest updates)\r\n\r\nOn every single visit I got a register with that 2 errors:\r\n```\r\nMessage: Could not set variable \"ip.xmlrpc_counter\" as the collection does not exist.\r\nMessage: Could not set variable \"ip.brute_force_counter\" as the collection does not exist.\r\nApache-Error: [file \"apache2_util.c\"] [line 277] [level 3] [client 47.128.63.199] ModSecurity: Could not set variable \"ip.xmlrpc_counter\" as the collection does not exist. [hostname \"c*******.com\"] [uri \"/img/logo-1660745973.jpg\"] [unique_id \"ZgVgQVdtYdAwqReLwcyDAwAAABc\"]\r\nApache-Error: [file \"apache2_util.c\"] [line 277] [level 3] [client 47.128.63.199] ModSecurity: Could not set variable \"ip.brute_force_counter\" as the collection does not exist. [hostname \"c*********.com\"] [uri \"/img/logo-1660745973.jpg\"] [unique_id \"ZgVgQVdtYdAwqReLwcyDAwAAABc\"]\r\nStopwatch: 1711628353557358 808 (- - -)\r\nStopwatch2: 1711628353557358 808; combined=108, p1=50, p2=42, p3=0, p4=0, p5=16, sr=0, sw=0, l=0, gc=0\r\nProducer: ModSecurity for Apache/2.9.7 (http://www.modsecurity.org/); CWAF_Apache.\r\nServer: Apache\r\nEngine-Mode: \"DETECTION_ONLY\"\r\n```\r\n\r\nTried to find info about, but can't find one single result from google nor bing, can please someone helpme to solve this?", + "summary": "Investigate the ModSecurity error related to the variables \"ip.brute_force_counter\" and \"ip.xmlrpc_counter\" not being set due to nonexistent collections.\n\n1. Confirm that ModSecurity is properly installed and configured on your Plesk server. Ensure you are using the latest version (2.9.7).\n2. Check your ModSecurity configuration files for any rules that reference the \"ip.brute_force_counter\" and \"ip.xmlrpc_counter\" variables. Ensure these rules are enabled and not commented out.\n3. Verify if the necessary collections for these variables are defined in the configuration. If they are missing, you need to create them. Use the following directive in your ModSecurity configuration:\n ```apache\n SecCollection \"ip.brute_force_counter\"\n SecCollection \"ip.xmlrpc_counter\"\n ```\n4. Restart Apache to ensure changes take effect:\n ```bash\n sudo systemctl restart apache2\n ```\n5. Monitor the Apache error logs to check if the errors persist after making changes:\n ```bash\n tail -f /var/log/apache2/error.log\n ```\n\nIf errors continue, consider enabling debug logging for ModSecurity to gather more detailed information about the issue. Use the following directive:\n```apache\nSecDebugLog /var/log/modsec_debug.log\nSecDebugLogLevel 9\n```\n\nAfter enabling debug logging, reproduce the issue and review the debug log for additional insights into the cause of the errors.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3115", @@ -63200,6 +65400,7 @@ "node_id": "I_kwDOABQmks6EJYJU", "title": "Is it possible to change the SecAuditLogStorageDir variable so that the logs are sorted by vhost?", "body": "Hello.\r\n\r\nI have libapache2-mod-security2 2.9.7-1 installed on debian 12.\r\nIn the configuration file /etc/modsecurity/modsecurity.conf\r\nDefault `SecAuditLogStorageDir = /opt/modsecurity/var/audit`\r\nand all logs are written together, regardless of vhost.\r\nIs it possible to sort by vhost?\r\n\r\n```\r\n/opt/modsecurity/var/audit/site1.com/\r\n/opt/modsecurity/var/audit/site2.com/\r\n```\r\n...\r\n\r\nThen, as an option to expand the functionality, you can add variables:\r\n```\r\n$vhost\r\n$year\r\n$month\r\n$day\r\n$hour\r\n$minute\r\n$second\r\n$id - some kind of unique identifier that is added to the end of the log name\r\n```\r\n\r\nso that you can create different options for log storage paths:\r\n```\r\n/opt/modsecurity/var/audit/$vhost/$year-$month-$day/$hour-$minute-$second-$ID.log\r\n/opt/modsecurity/var/audit/site.com/2024-03-31/11-51-03-ZgkZYEAFt1ApFkqHlmHjUgAAAAE.log\r\n```\r\nor\r\n```\r\n/opt/modsecurity/var/audit/$vhost/$year-$month/$day/$hour-$minute-$second-$ID.log\r\n/opt/modsecurity/var/audit/site.com/2024-03/31/11-51-03-ZgkZYEAFt1ApFkqHlmHjUgAAAAE.log\r\n```\r\n\r\nThx.", + "summary": "Change the `SecAuditLogStorageDir` variable to allow logs to be organized by vhost. The current setup in `/etc/modsecurity/modsecurity.conf` saves all logs to a single directory `/opt/modsecurity/var/audit`, which does not differentiate between virtual hosts.\n\n1. Verify the version of libapache2-mod-security2: Confirm you are using version 2.9.7-1 on Debian 12.\n2. Implement a variable-based logging system: Modify the logging configuration to incorporate dynamic variables such as `$vhost`, `$year`, `$month`, `$day`, `$hour`, `$minute`, `$second`, and `$id` for creating structured log paths.\n3. Define log path patterns: Consider implementing the following patterns to enhance log organization:\n - `/opt/modsecurity/var/audit/$vhost/$year-$month-$day/$hour-$minute-$second-$ID.log`\n - `/opt/modsecurity/var/audit/$vhost/$year-$month/$day/$hour-$minute-$second-$ID.log`\n4. Update the configuration: Make necessary changes in the `modsecurity.conf` file to use the new path patterns.\n5. Test the configuration: Restart the Apache service and test to ensure logs are being written to the new directories as intended.\n\nBy following these steps, you can successfully organize your ModSecurity logs by virtual host and improve log management.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3118", @@ -63230,6 +65431,7 @@ "node_id": "I_kwDOABQmks6En_1O", "title": "Lua installed, but Modsecurity still dont work with it ", "body": "I have installed Lua 5.4, Nginx 1.20.1, Modsecurity V3 and this is my config Modsecurity \r\n\r\n```\r\nModSecurity - v3.0.12-33-g625f9a53 for Linux\r\n\r\n Mandatory dependencies\r\n + libInjection ....v3.9.2-46-gbfba51f\r\n + SecLang tests ....a3d4405\r\n\r\n Optional dependencies\r\n + GeoIP/MaxMind ....found\r\n * (MaxMind) v1.5.2\r\n -lmaxminddb , -DWITH_MAXMIND\r\n + LibCURL ....found v7.76.1\r\n -lcurl, -DWITH_CURL_SSLVERSION_TLSv1_2 -DWITH_CURL\r\n + YAJL ....found v2.1.0\r\n -lyajl , -DWITH_YAJL\r\n + LMDB ....disabled\r\n + LibXML2 ....found v2.9.13\r\n -lxml2 , -I/usr/include/libxml2 -DWITH_LIBXML2\r\n + SSDEEP ....found\r\n -lfuzzy -L/usr/lib64/, -DWITH_SSDEEP -I/usr/include\r\n + LUA ....found v504\r\n -llua-5.4 -L/usr/lib64/, -DWITH_LUA -DWITH_LUA_5_4 -I/usr/include\r\n + PCRE2 ....disabled\r\n\r\n Other Options\r\n + Test Utilities ....enabled\r\n + SecDebugLog ....enabled\r\n + afl fuzzer ....disabled\r\n + library examples ....enabled\r\n + Building parser ....disabled\r\n + Treating pm operations as critical section ....disabled\r\n```\r\n\r\nLog Nginx :\r\n```\r\nnginx: [emerg] \"modsecurity_rules_file\" directive Rules error. File: . Line: 1. Column: 0. Failed to load script: Lua support was not enabled.\r\n```\r\n\r\nMy Rule :\r\n```\r\nSecRuleScript /opt/test.lua \"id: 100, pass\"\r\n```\r\n\r\nplease help me", + "summary": "Ensure Lua support is enabled in ModSecurity configuration. Follow these steps to troubleshoot and resolve the issue:\n\n1. **Verify Lua Installation**: Confirm that Lua 5.4 is correctly installed and accessible. Run `lua -v` in the terminal to check the version.\n\n2. **Recompile ModSecurity with Lua Support**:\n - Ensure that you have the necessary development libraries for Lua installed.\n - Rebuild ModSecurity by adding the `--with-lua` option during the configuration step. For example:\n ```\n ./configure --with-lua=/usr/local/include/lua5.4 --with-lua-lib=/usr/local/lib\n make\n make install\n ```\n\n3. **Check Nginx Configuration**:\n - Ensure that the `modsecurity_rules_file` directive is correctly set in the Nginx configuration file (typically found in `/etc/nginx/nginx.conf` or similar).\n - Example configuration:\n ```\n ModSecurity on;\n ModSecurityEngine On;\n ModSecurityConfig /etc/nginx/modsecurity.conf;\n modsecurity_rules_file /etc/nginx/modsecurity.conf;\n ```\n\n4. **Review Logs**:\n - After making changes, check Nginx error logs for any additional errors that may arise. Use `tail -f /var/log/nginx/error.log` to monitor logs in real time.\n\n5. **Test**:\n - Restart Nginx to apply the changes: `sudo systemctl restart nginx`.\n - Verify that ModSecurity is now loading the Lua script without errors.\n\n6. **Debugging**:\n - If the problem persists, enable ModSecurity debug logging to gather more detailed information about the error:\n ```\n SecDebugLog /var/log/modsec_debug.log\n SecDebugLogLevel 9\n ```\n\n7. **Consult Documentation**: Review the ModSecurity documentation regarding Lua integration for any specific requirements or additional configurations.\n\nBy following these steps, you should be able to enable Lua support in ModSecurity and resolve the loading issue with your Lua script.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3119", @@ -63258,6 +65460,7 @@ "node_id": "I_kwDOABQmks6GTrVH", "title": "Phasing out SecStatusEngine", "body": "A small discussion happened on the Slack #project-modsecurity from OWASP where I pointed out that, with TW changing ownership to OWASP of modsecurity, the domain name might need to be transmitted so that the SecStatusEngine option (https://github.com/owasp-modsecurity/ModSecurity/wiki/Reference-Manual-(v2.x)#secstatusengine), if this is still used, is still working as expected.\r\n\r\nAccording to @fzipi, this option should be disabled since a long time ago. This has been done in the default config 2 months ago by @airween (https://github.com/owasp-modsecurity/ModSecurity/commit/f850932f83d47a68137ab207e6db0f217152c707).\r\n\r\n@dune73 mentioned that this domain will be still in the hands of TW until Summer 2024, and that TW is not having their status engine in operation for quite some time.\r\n\r\nThat being said, we all pretty much agree that:\r\n- TW status engine will not come back.\r\n- This is now disabled by default, so very little people are going to turn it on.\r\n- This option will not be useful anymore, and will just pollute (if enabled) the DNS.\r\n- This options is not supported in v3 (https://github.com/owasp-modsecurity/ModSecurity/wiki/Reference-Manual-(v3.x)#secstatusengine), and will not be created.\r\n- Functionally, it does not bring anything to the end user.\r\n\r\nThis is where I'm proposing removal of this option from v2, while knowing this operation should be carefully considered so that no configuration gets broken.\r\n\r\nProbably we could first warn about this option being deprecated, following by removing the actual logic while keeping the warning, and finally removing this option altogether from the parsing logic and the documentations.", + "summary": "**Remove SecStatusEngine Option**\n\n1. Acknowledge the consensus that the SecStatusEngine option is outdated and no longer useful, as it is disabled by default and has not been functional for some time.\n\n2. Confirm that the option will not be supported in version 3 of ModSecurity, thus solidifying its obsolescence.\n\n3. Plan the removal process in three stages:\n - **Stage 1:** Introduce a deprecation warning in the codebase to inform users about the impending removal of the SecStatusEngine option.\n - **Stage 2:** Remove the logic associated with SecStatusEngine while maintaining the warning, ensuring that existing configurations do not break immediately.\n - **Stage 3:** Fully remove the SecStatusEngine option from the parsing logic and all related documentation.\n\n4. Consider the impact on users and ensure thorough testing is conducted after each phase to confirm that no unintended issues arise during the transition.\n\n5. Document the process and keep stakeholders informed throughout the removal timeline to maintain transparency and gather feedback.\n\n6. Prepare a timeline for these changes, ensuring alignment with the release schedule for future versions of ModSecurity.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3122", @@ -63286,6 +65489,7 @@ "node_id": "I_kwDOABQmks6GUFjM", "title": "Regular Expression Failure Triggers `!@rx`", "body": "**Describe the bug**\r\n\r\nWhen there's a regular expression error due to `SecPcreMatchLimit` or `SecPcreMatchLimitRecursion` (i.e. `MSC_PCRE_LIMITS_EXCEEDED`), a rule using `!@rx` will say that the rule was triggered. However, failures with `@rx` will say that the rule was not triggered. I think both should assume the rule was not triggered. See https://github.com/coreruleset/coreruleset/issues/3640#issuecomment-2035841946 for additional context.\r\n\r\n**To Reproduce**\r\n\r\nSee https://github.com/coreruleset/coreruleset/issues/3640#issuecomment-2037347642.\r\n\r\nYou can probably reproduce by setting `SecPcreMatchLimit` and `SecPcreMatchLimitRecursion` really low (maybe 5) and adding a `!@rx` rule.\r\n\r\n**Expected behavior**\r\n\r\nI would expect `!@rx` to not trigger a rule if there's a `MSC_PCRE_LIMITS_EXCEEDED` error.\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): ModSecurity v3.0.12 with nginx-connector v1.0.3\r\n - WebServer: nginx-1.24.0\r\n - OS (and distro): Amazon Linux 2\r\n\r\n**Rule Set (please complete the following information):**\r\n - Running any public or commercial rule set? CRS\r\n - What is the version number? 4.1.0", + "summary": "**Fix Regular Expression Failure Handling for `!@rx` Rules**\n\n1. **Identify the issue**: When a regular expression error occurs due to `SecPcreMatchLimit` or `SecPcreMatchLimitRecursion`, the current behavior incorrectly triggers `!@rx` rules while `@rx` rules do not trigger. \n\n2. **Expected behavior**: Modify the logic so that both `!@rx` and `@rx` rules assume the rule was not triggered in case of `MSC_PCRE_LIMITS_EXCEEDED`.\n\n3. **Reproduce the bug**:\n - Set `SecPcreMatchLimit` and `SecPcreMatchLimitRecursion` to a low value (e.g., 5).\n - Create a rule that uses `!@rx`.\n - Observe that the rule triggers despite the regex error.\n\n4. **Technical steps to address the issue**:\n - Review the code segment responsible for handling regex errors in ModSecurity.\n - Implement a condition to check for `MSC_PCRE_LIMITS_EXCEEDED` specifically.\n - Ensure that the logic for both `@rx` and `!@rx` rules is consistent in handling this error.\n\n5. **Testing**:\n - After modifying the code, retest using the low limits and the `!@rx` rule.\n - Validate that the rule does not trigger on regex errors.\n\n6. **Documentation**: Update relevant documentation to reflect the changes in behavior for `!@rx` rules under regex failure scenarios. \n\n7. **Version Control**: Commit the changes and create a pull request for further review. \n\n8. **Monitor**: After deployment, monitor for any issues related to regex handling to ensure stability.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3123", @@ -63316,6 +65520,7 @@ "node_id": "I_kwDOABQmks6GoHT6", "title": "Incorrect utf8toUnicode transformation for 00xx", "body": "In utf8_unicode_inplace_ex(), we have the following code:\r\n```\r\nc = *utf;\r\n/* If first byte begins with binary 0 it is single byte encoding */\r\nif ((c & 0x80) == 0) {\r\n /* single byte unicode (7 bit ASCII equivilent) has no validation */\r\n count++;\r\n if (count <= len) {\r\n if (c == 0) *data = x2c(&c);\r\n else *data++ = c;\r\n }\r\n}\r\n```\r\nThe code `if (c == 0) *data = x2c(&c);` is wrong.\r\nutf the input string.\r\nIf input is \"x\", we copy the character => correct.\r\nIf input is \"\\x00\\xA0\", we call the x2c function which expects an hexadecimal string as input (like \"20\" for a space).\r\nFurthermore, the output buffer (data) is not increased, so the character is overwritten on the next loop.\r\nThe correct code is\r\n```\r\nif (c == 0) {\r\n sprintf(data, \"%%u%04x\", utf[1]);\r\n count += 4;\r\n data += 6;\r\n i++;\r\n *changed = 1;\r\n}\r\n```\r\n\r\nRemarks:\r\n\r\n1. Using \"%04x\" replaces a lot of useless code used in other parts of the code\r\n2. I didn't add a check `if (count <= len)` after `count += 4;` because it cannot overflow if we fix the code: `len = input_len * 6 + 1;` instead of `len = input_len * 4 + 1;` (4 bytes -> %u1234). The check (and the variable count) could thus be removed everywhere\r\n3. After filling data, we have several checks (/* invalid UTF-8 character number range (RFC 3629) */, /* check for overlong */, ...) with this code `count++; *data++ = c;` What's the point? This copies the character after the ones we already copied. This looks wrong to me. Any reason to keep this code?", + "summary": "Update the `utf8_unicode_inplace_ex()` function to correct the handling of the null byte (0x00) during UTF-8 to Unicode transformations. \n\n1. Replace the incorrect line:\n ```c\n if (c == 0) *data = x2c(&c);\n ```\n with the following block:\n ```c\n if (c == 0) {\n sprintf(data, \"%%u%04x\", utf[1]);\n count += 4;\n data += 6;\n i++;\n *changed = 1;\n }\n ```\n\n2. Ensure that the `len` parameter is set to `input_len * 6 + 1` to accommodate the new Unicode format. This change allows the removal of unnecessary checks for buffer overflow after incrementing the count.\n\n3. Review and possibly eliminate the checks after filling the data. Specifically, the existing code `count++; *data++ = c;` appears redundant, as it copies characters that have already been processed. Clarify the purpose of these checks and determine if they serve any valid function in the context.\n\n4. Implement unit tests to validate the transformation logic, especially for edge cases such as input strings containing null bytes and invalid UTF-8 sequences.\n\n5. Document the changes thoroughly to ensure clarity on the new logic and the reasoning behind the adjustments to the buffer size and character handling.\n\nStart by modifying the code as discussed and validating the results with appropriate test cases.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3129", @@ -63344,6 +65549,7 @@ "node_id": "I_kwDOABQmks6G1ChU", "title": "@rbl operator does not support IPv6", "body": "**Describe the bug**\r\n\r\nThis is a duplicated issue, the first one is more that 4 years old - see #2210.\r\n\r\nSeems like `@rbl` operator does not support IPv6.\r\n\r\nThere was the issue #3111, during the investigation I found this behavior.\r\n\r\n**Logs and dumps**\r\n\r\nThis is what I found the log while I tried the operator:\r\n\r\nlibmodsecurity3:\r\n```\r\nTarget value: \"::1\" (Variable: REMOTE_ADDR)\r\nFailed to understand `::1' as a valid IP address, assuming domain format input\r\nRBL lookup of ::1 failed.\r\n```\r\n\r\nmod-security2:\r\n```\r\nTarget value: \"::1\"\r\nRBL lookup of ::1.xbl.spamhaus.org failed at REMOTE_ADDR.\r\n```\r\n\r\nThe expected format would be:\r\n\r\n```\r\n1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ipv6.xbl.spamhaus.org\r\n```\r\n\r\n**To Reproduce**\r\n\r\nUse the rule set given at [this](https://github.com/owasp-modsecurity/ModSecurity/issues/3111#issuecomment-2018464289) issue comment, and send a request:\r\n\r\n```\r\ncurl -H \"Host: localhost\" http://[::1]/\r\n```\r\n\r\n**Expected behavior**\r\n\r\nIn the log it should be the expected format above, eg:\r\n\r\n```\r\nRBL lookup of 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ipv6.xbl.spamhaus.org failed at REMOTE_ADDR.\r\n```\r\n\r\n**Server (please complete the following information):**\r\n - ModSecurity version (and connector): libmodsecurity3 all versions, mod_security2 all versions\r\n", + "summary": "Fix the `@rbl` operator to support IPv6 addresses. Currently, it fails to recognize IPv6 format, as evidenced by logs indicating that `::1` is treated as a domain instead of a valid IP address. \n\n1. Investigate the existing implementation of the `@rbl` operator in both libmodsecurity3 and mod_security2 to identify the parsing logic for IP addresses.\n2. Create a function to convert IPv6 addresses into the expected format for RBL lookups, as shown:\n ```\n 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ipv6.xbl.spamhaus.org\n ```\n3. Modify the rule set to include test cases that utilize IPv6 addresses, ensuring compatibility.\n4. Test the operator with various IPv6 addresses using `curl` to simulate incoming requests, for example:\n ```\n curl -H \"Host: localhost\" http://[::1]/\n ```\n5. Validate that the log outputs the transformed format correctly, confirming the RBL lookup functionality for IPv6.\n\nStart by reviewing existing issues related to IPv6 support and examine the codebase for relevant parsing functions.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3131", @@ -63361,11 +65567,11 @@ 976 ], "labels": [ - 400, + 418, + 419, 398, 399, - 418, - 419 + 400 ] } }, @@ -63378,6 +65584,7 @@ "node_id": "I_kwDOABQmks6G-bqd", "title": "[Idea] Add variable support for SecAuditLog", "body": "I would like to provide a log file separately for each user and their website. Is there a chance to do this using variables SecAuditLog?", + "summary": "Implement variable support for SecAuditLog to enable separate log files for each user and their respective websites. \n\n1. Modify the SecAuditLog directive to accept variables that can dynamically generate log file names based on user identifiers or website domains.\n2. Consider using Apache's environment variables or request variables to capture user-specific data.\n3. Implement logic to handle the creation of log directories if they don’t exist, ensuring that logs are organized by user.\n4. Test the configuration with different user scenarios to verify that logs are correctly generated and stored.\n5. Document the new variable usage in the project’s README or wiki for user guidance.\n\nBegin by exploring the existing SecAuditLog implementation and identifying where modifications can be made to introduce variable support.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3133", @@ -63404,6 +65611,7 @@ "node_id": "I_kwDOABQmks6H3-OZ", "title": "Update link on Reference Manual v3 wiki page", "body": "**Describe the bug**\r\n\r\nThe [Base64DecodeExt](https://github.com/owasp-modsecurity/ModSecurity/wiki/Reference-Manual-(v3.x)#user-content-base64DecodeExt) item in the Reference Manual wiki page provide a link that isn't currently online now. I found a version on Internet Wayback machine, and so I want to update this information. But I don't know how to proppose such change, because the Github pages don't support PRs.\r\n\r\n**To Reproduce**\r\n\r\nFollowing the actual link \"blog post on Base64Decoding evasion issues on PHP sites\" point to http://blog.spiderlabs.com/2010/04/impedance-mismatch-and-base64.html url, that isn't online.\r\n\r\n\r\n**Expected behavior**\r\n\r\nThe link can point to https://web.archive.org/web/20110713164920/http://blog.spiderlabs.com/2010/04/impedance-mismatch-and-base64.html for example.", + "summary": "Update the Base64DecodeExt link on the Reference Manual v3 wiki page. \n\n1. Identify the broken link to the \"blog post on Base64Decoding evasion issues on PHP sites\" currently located at `http://blog.spiderlabs.com/2010/04/impedance-mismatch-and-base64.html`.\n2. Replace it with the archived version available at `https://web.archive.org/web/20110713164920/http://blog.spiderlabs.com/2010/04/impedance-mismatch-and-base64.html`.\n3. Propose the change by creating an issue in the GitHub repository since the wiki does not support pull requests.\n4. Clearly state the reason for the update and include the new link in the issue description.\n5. Optionally, tag relevant maintainers or contributors to expedite the review process. \n\nStart by drafting an issue with the above details and submit it for review.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3136", @@ -63434,6 +65642,7 @@ "node_id": "I_kwDOABQmks6IbBK6", "title": "Encountering SIGSEGV when parsing multiple rule sets in parallel", "body": "**Describe the bug**\r\n\r\nHello!\r\n\r\nI'm encountering an error when creating multiple rule sets and adding rules to each of them in a multi-threaded environment.\r\n\r\n**Logs and dumps**\r\n\r\nI've traced it back to `yylex` in `yy::seclang_parser::parse()`:\r\n\r\n```\r\n#0 0x00007bb94eea02d8 in yylex(modsecurity::Parser::Driver&) ()\r\n from /usr/lib/libmodsecurity.so.3\r\n#1 0x00007bb94ee78c0e in yy::seclang_parser::parse() ()\r\n from /usr/lib/libmodsecurity.so.3\r\n#2 0x00007bb94eeba170 in modsecurity::Parser::Driver::parse(std::__cxx11::basic_string, std::allocator > const&, std::__cxx11::basic_string, std::allocator > const&) ()\r\n from /usr/lib/libmodsecurity.so.3\r\n#3 0x00007bb94eeba4a6 in modsecurity::Parser::Driver::parseFile(std::__cxx11::basic_string, std::allocator > const&) ()\r\n from /usr/lib/libmodsecurity.so.3\r\n#4 0x00007bb94eed0a17 in modsecurity::RulesSet::loadFromUri(char const*) ()\r\n from /usr/lib/libmodsecurity.so.3\r\n#5 0x00007bb94eed0b68 in msc_rules_add_file ()\r\n from /usr/lib/libmodsecurity.so.3\r\n\r\n...(omitted for brevity)\r\n```\r\n\r\nIs this a known issue? Is `Rules` meant to be initialized and filled only once and in a single-threaded context?", + "summary": "Investigate SIGSEGV occurring during parallel parsing of multiple rule sets. \n\n**Reproduce the issue:** \n1. Create multiple rule sets.\n2. Add rules to each rule set in a multi-threaded environment.\n\n**Identify the source of the error:** \nExamine the stack trace of the error, which indicates that the problem arises in the `yylex` function during the parsing process in `yy::seclang_parser::parse()`.\n\n**Technical details to consider:**\n- Confirm if `Rules` objects are intended to be thread-safe.\n- Review the implementation of `modsecurity::RulesSet::loadFromUri()` and `modsecurity::Parser::Driver::parseFile()` for shared state or non-thread-safe operations.\n- Investigate how `yylex` interacts with stateful data in a multi-threaded context.\n\n**First steps to tackle the problem:**\n1. Check the ModSecurity documentation for thread safety guidelines regarding rules parsing.\n2. Implement mutex locks around rule set creation and modification to ensure thread safety.\n3. Create minimal test cases to reproduce the SIGSEGV in isolation, focusing on rule set loading in parallel.\n4. Analyze the memory management practices in the parser to ensure proper allocation and deallocation.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3138", @@ -63462,6 +65671,7 @@ "node_id": "I_kwDOABQmks6I9MNN", "title": "Apache: Short Lingering Close", "body": "Looking through the source code of `mod_reqtimeout`, I saw that it uses the `short-lingering-close` request note to considerably shorten the potential duration of a lingering connection close:\r\nhttps://github.com/apache/httpd/blob/trunk/modules/filters/mod_reqtimeout.c#L329-L335\r\n\r\nI did not see this note being used in ModSecurity, though I'm not sure whether this is even applicable for ModSecurity or whether ModSecurity foregoes the lingering close completely.\r\n", + "summary": "Investigate the application of the `short-lingering-close` request note in ModSecurity. Review the source code of `mod_reqtimeout` in Apache, specifically lines 329-335, to understand how it shortens lingering connection close durations.\n\n1. Analyze the implementation of `mod_reqtimeout` to identify how the `short-lingering-close` note is integrated.\n2. Determine if ModSecurity has a mechanism for handling lingering closes. Check for existing connection timeout settings or related notes in ModSecurity’s codebase.\n3. Evaluate whether implementing the `short-lingering-close` note in ModSecurity would be beneficial. Consider the impact on performance and connection handling.\n4. If applicable, create a patch or modification to introduce the `short-lingering-close` functionality in ModSecurity.\n5. Test the changes in a development environment to verify the expected behavior and performance improvements.\n6. Document the findings and any changes made, and open a pull request for review if enhancements are deemed valid.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3143", @@ -63492,6 +65702,7 @@ "node_id": "I_kwDOABQmks6KxBqY", "title": "Best solution/workaround sanitise modsecurity v3", "body": "** Description of the bug **\r\nOn modsecurity v3 and OWASP CRS 4.x there are a lot of password rule matching and we notice the password printed into the modsecurity audit logs. We also noticed that sanitiseArg is not supported on v3 branch.\r\n\r\nDo you plan to support in the near future this important function?\r\n\r\nDo you aware of a better method than removing the printed part of values ​​via SecAuditLogParts?\r\n\r\nThe found some issue related to the sanitise implementation on v3 branch like:\r\n\r\n#1132 \r\n#1898 ", + "summary": "Address the bug regarding password logging in ModSecurity v3 with OWASP CRS 4.x. \n\n1. Investigate the current implementation of password matching rules that result in passwords being logged in the audit logs.\n2. Review the limitations of the `sanitiseArg` function, which is not supported in the v3 branch.\n3. Explore alternative methods to prevent sensitive data from being logged, aside from modifying `SecAuditLogParts`.\n4. Examine related issues #1132 and #1898 for insights on existing discussions or proposed solutions.\n5. Consider proposing a new feature or enhancement to support sanitization in future ModSecurity releases.\n6. Test potential workarounds or custom logging configurations that exclude sensitive information.\n\nInitiate discussions within the community to gather feedback and suggestions for managing this sensitive data logging issue.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3163", @@ -63509,8 +65720,8 @@ 976 ], "labels": [ - 399, - 418 + 418, + 399 ] } }, @@ -63523,6 +65734,7 @@ "node_id": "I_kwDOABQmks6LZ-bf", "title": "@pmFromFile problems", "body": "Hi, I'm running the official owasp/modsecurity-crs:nginx container with latest CRS version 4.3 and libmodsecurity3 version 3.0.12, I added this rule to my REQUEST-900-EXCLUSION-RULES-BEFORE-CRS.conf file:\r\n\r\n```\r\nSecRule REQUEST_FILENAME \"!@pmFromFile webapp_endpoints.data\" \\\r\n \"id:900998,\\\r\n phase:2,\\\r\n block,\\\r\n capture,\\\r\n t:none,\\\r\n severity:'CRITICAL',\\\r\n setvar:'tx.inbound_anomaly_score_pl1=+%{tx.critical_anomaly_score}'\"\r\n```\r\n\r\nthe file webapp_endpoints.data contains this content:\r\n```\r\n/index.php\r\n/index2.php\r\n/test.php\r\n```\r\n\r\nExpected result:\r\nall 3 pages should load fine, but only test.php does load for some reason. I think it may be a bug because the documentation for `@pmFromFile` says that you can use your own boundaries in phrases, like /1.2.3.4/, but I tried that and it doesn't work. Can you reproduce or tell me if it's a modsecurity bug or i'm just doing something wrong?", + "summary": "Investigate the issue with the `@pmFromFile` directive in the ModSecurity CRS. \n\n1. **Verify the Rule Implementation**: Confirm that the rule added to `REQUEST-900-EXCLUSION-RULES-BEFORE-CRS.conf` is correctly formatted and placed. Ensure there are no syntax errors or conflicting rules that may affect its execution.\n\n2. **Inspect the Data File**: Check the `webapp_endpoints.data` file for proper formatting. Ensure it is accessible and correctly loaded by ModSecurity. Each entry should be properly listed without extraneous characters or spaces.\n\n3. **Test Regular Expression Matching**: Validate the behavior of `@pmFromFile` with simpler test cases. Create a small data file with easier strings (e.g., `/test.php`, `/index.php`) and check if the issue persists. \n\n4. **Review ModSecurity Logs**: Enable detailed logging for ModSecurity to capture any anomalies during rule processing. Look for log entries related to the rule ID `900998` to identify why `/index.php` and `/index2.php` are not being matched.\n\n5. **Consult Documentation**: Double-check the ModSecurity documentation regarding `@pmFromFile` and ensure the format and usage align with the latest standards.\n\n6. **Reproduce the Issue**: Set up a minimal test environment replicating the issue. Use the same container and configuration to determine if the problem can be reproduced consistently.\n\n7. **File an Issue**: If the problem appears to be a bug after thorough testing, provide detailed findings including your configuration, data file, and logs in a new GitHub issue for further assistance from the community or maintainers.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3165", @@ -63551,6 +65763,7 @@ "node_id": "I_kwDOABQmks6Lmspa", "title": "Assistance Required with ModSecurity Rule Compatibility for OpenLiteSpeed", "body": "I recently came across the [following ModSecurity rule](https://stackoverflow.com/questions/53620557/modsecurity-apache-how-to-limit-access-rate-by-header) intended to limit client hits by user agent:\r\n\r\n\r\n```# Limit client hits by user agent\r\nSecRule REQUEST_HEADERS:User-Agent \"@pm facebookexternalhit\" \\\r\n \"id:400009,phase:2,nolog,pass,setvar:global.ratelimit_facebookexternalhit=+1,expirevar:global.ratelimit_facebookexternalhit=3\"\r\nSecRule GLOBAL:RATELIMIT_FACEBOOKEXTERNALHIT \"@gt 1\" \\\r\n \"chain,id:4000010,phase:2,pause:300,deny,status:429,setenv:RATELIMITED,log,msg:'RATELIMITED BOT'\"\r\n SecRule REQUEST_HEADERS:User-Agent \"@pm facebookexternalhit\"\r\nHeader always set Retry-After \"3\" env=RATELIMITED\r\nErrorDocument 429 \"Too Many Requests\"\r\n```\r\n\r\nUnfortunately, this rule does not seem to work with OpenLiteSpeed. Could you please help me rewrite this ModSecurity rule to make it compatible with OpenLiteSpeed?\r\n\r\nThank you for your assistance.", + "summary": "Rewrite ModSecurity rule for OpenLiteSpeed compatibility. \n\n1. Identify the purpose of the original ModSecurity rule, which is to limit client hits by user agent, specifically for \"facebookexternalhit\".\n2. Adapt the syntax and logic to fit OpenLiteSpeed's configuration and rule set.\n\nPossible first steps:\n- Review OpenLiteSpeed documentation on security rules and rate limiting.\n- Implement a basic request counter for the user agent using OpenLiteSpeed's internal variables or headers.\n- Set up a custom response for when the limit is exceeded (HTTP status 429).\n- Test the new rule using various user agents to ensure it functions correctly.\n\nUtilize the following example structure to create the new rule:\n\n```apache\n# OpenLiteSpeed pseudo-code\nSecRule REQUEST_HEADERS:User-Agent \"@pm facebookexternalhit\" {\n set global.ratelimit_facebookexternalhit = global.ratelimit_facebookexternalhit + 1\n if (global.ratelimit_facebookexternalhit > 1) {\n set response.status = 429\n set response.body = \"Too Many Requests\"\n set response.headers.Retry-After = \"3\"\n exit\n }\n expire global.ratelimit_facebookexternalhit = 3\n}\n```\n\nAdjust according to OpenLiteSpeed capabilities and test thoroughly.", "state": "open", "state_reason": "", "url": "https://github.com/owasp-modsecurity/ModSecurity/issues/3169", @@ -63579,6 +65792,7 @@ "node_id": "I_kwDOABQmks6L7zrw", "title": "Problem about proxy action", "body": "**Describe the bug**\r\n\r\nI find a problem about proxy action.\r\nIf access a specified webpage file, such as \"http://a.com/a/index.html\", it can be successfully forwarded.\r\nIf access a directory, such as \"http://a.com/a/\", the error_log can log the url will be forwarded, but actually it cannot be forwarded.\r\n\r\n\r\n**Logs and dumps**\r\n\r\n[Wed Jun 12 11:04:12.784758 2024] [security2:error] [pid 97647:tid 139691269490432] [client 10.16.18.12:49131] [client 10.16.18.12] ModSecurity: Access denied using proxy to (phase 2) http://www.test1.com/a/index.html. detected XSS using libinjection. [file \"/www/server/apache/conf/modsecurity/rules/REQUEST-941-APPLICATION-ATTACK-XSS.conf\"] [line \"100\"] [id \"941100\"] [msg \"XSS Attack Detected via libinjection\"] [data \"Matched Data: XSS data found within ARGS:id: HTTP/1.1\", host: \"localhost\"\r\n\r\nBut in Coraza, none of the variables shows its value:\r\n> 2023/07/11 12:53:56 [DEBUG] Parsing directive line=\"SecAction \\\"id:980170,phase:5,pass,t:none,noauditlog,msg:'Anomaly Scores: (Inbound Scores: blocking=`%{tx.blocking_inbound_anomaly_score}`, detection=%{tx.detection_inbound_anomaly_score}, per_pl=%{tx.inbound_anomaly_score_pl1}-%{tx.inbound_anomaly_score_pl2}-%{tx.inbound_anomaly_score_pl3}-%{tx.inbound_anomaly_score_pl4}, threshold=%{tx.inbound_anomaly_score_threshold}) - (Outbound Scores: blocking=%{tx.blocking_outbound_anomaly_score}, detection=%{tx.detection_outbound_anomaly_score}, per_pl=%{tx.outbound_anomaly_score_pl1}-%{tx.outbound_anomaly_score_pl2}-%{tx.outbound_anomaly_score_pl3}-%{tx.outbound_anomaly_score_pl4}, threshold=%{tx.outbound_anomaly_score_threshold}) - (SQLI=%{tx.sql_injection_score}, XSS=%{tx.xss_score}, RFI=%{tx.rfi_score}, LFI=%{tx.lfi_score}, RCE=%{tx.rce_score}, PHPI=%{tx.php_injection_score}, HTTP=%{tx.http_violation_score}, SESS=%{tx.session_fixation_score}, COMBINED_SCORE=%{tx.anomaly_score})',tag:'reporting',ver:'OWASP_CRS/4.0.0-rc1'\\\"\"\r\n\r\nLog configurations:\r\n```\r\nSecDebugLogLevel 9\r\nSecDebugLog \"./debug.log\"\r\nSecAuditLog \"./audit.log\"\r\nSecAuditEngine RelevantOnly\r\nSecAuditEngine On\r\nSecAuditLogParts ABCHIJKZ\r\nSecAuditLogFormat JSON\r\nSecAuditLogType Serial\r\n```\r\nI also checked the doc and wrote separate rules to test, and it seems I cannot retrieve any tx value from Coraza in audit/error.log.\r\nIn brief, I can get variables in error/audit from ModSec, whereas both logs cannot get the variables in Coraza\r\n\r\n### Steps to reproduce\r\n\r\nStart ModSec v2.9 / v3 (using docker-crs) and run the following requests. Next, run with Coraza\r\n```sh\r\ncurl -I 'http://localhost/?param=\">' --insecure\r\n```\r\n\r\n### Expected result\r\n\r\nThe log should display the variable `ts.variable`\r\n\r\n### Actual result\r\n\r\nIn the Coraza logs (audit/error), the variable `tx` cannot be interpolated.\r\n", + "summary": "Investigate the inability of Coraza to read and interpolate variables in the audit/error.log compared to ModSecurity (ModSec). \n\n1. **Reproduce the Issue**:\n - Start ModSec v2.9/v3 using docker-crs.\n - Use the provided `curl` command to send a request.\n - Check if the logs display the `tx` variables as expected.\n - Repeat the same process with Coraza to observe the absence of variable interpolation.\n\n2. **Log Configuration Review**:\n - Ensure that the logging configurations in Coraza are set correctly:\n ```\n SecDebugLogLevel 9\n SecDebugLog \"./debug.log\"\n SecAuditLog \"./audit.log\"\n SecAuditEngine RelevantOnly\n SecAuditEngine On\n SecAuditLogParts ABCHIJKZ\n SecAuditLogFormat JSON\n SecAuditLogType Serial\n ```\n\n3. **Documentation Check**:\n - Review Coraza documentation regarding variable handling and logging specifics. Confirm if there are any differences in how Coraza manages variable interpolation compared to ModSec.\n\n4. **Debugging**:\n - Increase the debug log level in Coraza to capture more detailed logs.\n - Analyze the debug log to identify where variable interpolation might be failing.\n\n5. **Testing with Custom Rules**:\n - Write simple test rules in Coraza that attempt to access and log various `tx` variables.\n - Check if any of these rules produce the expected output in the audit/error logs.\n\n6. **Community and Support**:\n - Engage with the Coraza community or open an issue on GitHub for assistance, providing detailed logs and steps taken to replicate the issue.\n\nAddress these steps systematically to identify and resolve the variable interpolation issue in Coraza logs.", "state": "open", "state_reason": "", "url": "https://github.com/corazawaf/coraza/issues/847", @@ -69921,6 +72360,7 @@ "node_id": "I_kwDOD-_h5M5sIR-E", "title": "Logging testing", "body": "## Description\n\nOver the last months we have been receiving issues from users that involve log output in different parts of the library and connectors. For example:\n\n- https://github.com/corazawaf/coraza-caddy/issues/87\n- https://github.com/corazawaf/coraza-caddy/issues/75\n- https://github.com/corazawaf/coraza-caddy/issues/32\n- https://github.com/corazawaf/coraza-caddy/issues/20\n- https://github.com/corazawaf/coraza/issues/847\n- https://github.com/corazawaf/coraza/issues/839\n- https://github.com/corazawaf/coraza/issues/760\n\nWhile usually logs are not part of the public API because they are not something people are supposed to consume in a consistent or programatic way, it looks like in a WAF (by popular demand) they are crucial when it comes to operating a system that consumes this library in any way.\n\nHence, because software is built on top of use cases, for coraza specifically, we should be assuring that logs are also something to consider when it comes to production readiness.\n\n### Expected result\n\nThe outcome of this epic should be:\n1. identifying the critial outcomes from logs\n2. add tests that assert correctness around them\n3. Close all the aforementioned issues", + "summary": "Implement logging tests to enhance the reliability of the library's log output. \n\n1. Identify critical log outcomes that users require for effective system operation.\n2. Add unit and integration tests to assert the correctness of these log outputs.\n3. Resolve existing user-reported issues related to logging (e.g., issues #87, #75, #32, #20, #847, #839, #760).\n\n### First Steps:\n- Review existing log-related issues to compile a list of required log outputs.\n- Determine necessary log levels and formats that align with user needs.\n- Develop test cases that cover various logging scenarios, ensuring logs are generated correctly under different conditions.\n- Utilize a logging framework (if not already in use) to facilitate structured logging and testing.\n- Execute tests and validate their effectiveness in capturing the defined log outputs.\n- Document the logging behavior and update the library's API documentation accordingly.", "state": "open", "state_reason": "", "url": "https://github.com/corazawaf/coraza/issues/849", @@ -69936,8 +72376,8 @@ "repository": 1237, "assignees": [], "labels": [ - 461, - 467 + 467, + 461 ] } }, @@ -69950,6 +72390,7 @@ "node_id": "I_kwDOD-_h5M5tibDg", "title": "AuditLog parity with libmodsecurity3", "body": "## Summary\r\n\r\nCorazas current AuditLog format in json has empty values for important fields and is overall less detailed compared to libmodsecurity3. Since the stated goal is to be a \"drop in replacement\" for modsecurity, logging parity would be beneficial. \r\n\r\n### Basic example\r\n\r\nBelow are AuditLogs from coraza-spoa ccompared to libmodsecurity3 with the following http request intended to trigger the WAF.\r\ncurl -v http://127.0.0.100/?x\\=/etc/passwd\r\n\r\n\r\nAuditLog generated from coraza-spoa \r\n```\r\n{\r\n \"transaction\": {\r\n \"timestamp\": \"2023/08/05 13:58:06\",\r\n \"unix_timestamp\": 1691236686919190605,\r\n \"id\": \"622c3c2b-073f-43c8-9878-657be5d8a69f\",\r\n \"client_ip\": \"127.0.0.1\",\r\n \"client_port\": 6203,\r\n \"host_ip\": \"127.0.0.100\",\r\n \"host_port\": 80,\r\n \"server_id\": \"\",\r\n \"request\": {\r\n \"method\": \"\",\r\n \"protocol\": \"\",\r\n \"uri\": \"\",\r\n \"http_version\": \"\",\r\n \"headers\": {\r\n \"accept\": [\r\n \"*/*\"\r\n ],\r\n \"host\": [\r\n \"127.0.0.100\"\r\n ],\r\n \"user-agent\": [\r\n \"curl/7.61.1\"\r\n ]\r\n },\r\n \"body\": \"\",\r\n \"files\": null\r\n },\r\n \"response\": {\r\n \"protocol\": \"\",\r\n \"status\": 0,\r\n \"headers\": {},\r\n \"body\": \"\"\r\n },\r\n \"producer\": {\r\n \"connector\": \"\",\r\n \"version\": \"\",\r\n \"server\": \"\",\r\n \"rule_engine\": \"On\",\r\n \"stopwatch\": \"1691236686919190605 1091256; combined=1046168, p1=360327, p2=646155, p3=0, p4=0, p5=39686\",\r\n \"rulesets\": [\r\n \"OWASP_CRS/4.0.0-rc1\"\r\n ]\r\n }\r\n }\r\n}\r\n```\r\n\r\nThere are details in coraza-spoa default error log but this is only partially json and not compareable to the auditlog:\r\n```\r\n{\r\n \"level\": \"error\",\r\n \"ts\": 1691236686.9202244,\r\n \"msg\": \"[client \\\"127.0.0.1\\\"] Coraza: Access denied (phase 2). Inbound Anomaly Score Exceeded (Total Score: 13) [file \\\"/path/to/rules/REQUEST-949-BLOCKING-EVALUATION.conf\\\"] [line \\\"11001\\\"] [id \\\"949110\\\"] [rev \\\"\\\"] [msg \\\"Inbound Anomaly Score Exceeded (Total Score: 13)\\\"] [data \\\"\\\"] [severity \\\"emergency\\\"] [ver \\\"OWASP_CRS/4.0.0-rc1\\\"] [maturity \\\"0\\\"] [accuracy \\\"0\\\"] [tag \\\"anomaly-evaluation\\\"] [hostname \\\"127.0.0.100\\\"] [uri \\\"/?x=/etc/passwd\\\"] [unique_id \\\"622c3c2b-073f-43c8-9878-657be5d8a69f\\\"]\\n\"\r\n}\r\n```\r\n\r\nAuditLog from libmodsecurity3\r\n```\r\n \"transaction\": {\r\n \"client_ip\": \"127.0.0.1\",\r\n \"time_stamp\": \"Thu Aug 3 22:42:14 2023\",\r\n \"server_id\": \"5dab6b80f157\",\r\n \"client_port\": 0,\r\n \"host_ip\": \"127.0.0.100\",\r\n \"host_port\": 80,\r\n \"unique_id\": \"d181c234abcd1be01421\",\r\n \"request\": {\r\n \"method\": \"GET\",\r\n \"http_version\": 1.1,\r\n \"uri\": \"/?x=/etc/passwd\",\r\n \"body\": \"\",\r\n \"headers\": {\r\n \"host\": \"example.tld\",\r\n \"user-agent\": \"curl/7.61.1\",\r\n \"accept\": \"*/*\",\r\n \"x-forwarded-for\": \"127.0.0.1\"\r\n }\r\n },\r\n \"response\": {\r\n \"body\": \"\\n\\n403 Forbidden\\n\\n