Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Notification #1145

Open
wants to merge 10 commits into
base: back-branch-master
Choose a base branch
from
2 changes: 1 addition & 1 deletion backend/dataset/admin.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import resource
# import resource
from django.contrib import admin
from import_export.admin import ImportExportActionModelAdmin
from .resources import *
Expand Down
15 changes: 15 additions & 0 deletions backend/dataset/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,21 @@ class DatasetInstanceSerializer(serializers.ModelSerializer):
class Meta:
model = DatasetInstance
fields = "__all__"


class DatasetInstanceSerializerOptimized(serializers.ModelSerializer):
class Meta:
model = DatasetInstance
fields = [
"instance_id",
"parent_instance_id",
"instance_name",
"instance_description",
"dataset_type",
"public_to_managers",
"organisation_id"
]



class DatasetInstanceUploadSerializer(serializers.Serializer):
Expand Down
119 changes: 113 additions & 6 deletions backend/dataset/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,11 @@
from . import resources
from .models import *
from .serializers import *
from django.db.models import Prefetch, Q, F
from utils.dataset_utils import get_batch_dataset_upload_status
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework import status
from .tasks import upload_data_to_data_instance, deduplicate_dataset_instance_items
import dataset
from tasks.models import (
Expand Down Expand Up @@ -244,6 +249,8 @@ def retrieve(self, request, pk, *args, **kwargs):
),
],
)


def list(self, request, *args, **kwargs):
# Org Owners and superusers see all datasets
if request.user.is_superuser:
Expand All @@ -257,7 +264,6 @@ def list(self, request, *args, **kwargs):
queryset = DatasetInstance.objects.filter(
organisation_id=request.user.organization
).filter(Q(public_to_managers=True) | Q(users__id=request.user.id))

if "dataset_visibility" in request.query_params:
dataset_visibility = request.query_params["dataset_visibility"]
if dataset_visibility == "all_public_datasets":
Expand All @@ -267,18 +273,15 @@ def list(self, request, *args, **kwargs):
queryset = queryset.filter(public_to_managers=True)
elif dataset_visibility == "my_datasets":
queryset = queryset.filter(users__id=request.user.id)

# Filter the queryset based on the query params
if "dataset_type" in dict(request.query_params):
queryset = queryset.filter(
dataset_type__exact=request.query_params["dataset_type"]
)

# Serialize the distinct items and sort by instance ID
serializer = DatasetInstanceSerializer(
queryset.distinct().order_by("instance_id"), many=True
)

# Add status fields to the serializer data
for dataset_instance in serializer.data:
# Get the task statuses for the dataset instance
Expand All @@ -288,14 +291,118 @@ def list(self, request, *args, **kwargs):
dataset_instance_time,
dataset_instance_result,
) = get_dataset_upload_status(dataset_instance["instance_id"])

# Add the task status and time to the dataset instance response
dataset_instance["last_upload_status"] = dataset_instance_status
dataset_instance["last_upload_date"] = dataset_instance_date
dataset_instance["last_upload_time"] = dataset_instance_time
dataset_instance["last_upload_result"] = dataset_instance_result

return Response(serializer.data)


# def get_queryset(self):
@action(detail=False, methods=["get"], url_path="optimized-list")
def list_optimized(self, request):
# Base queryset determination based on user role
queryset = DatasetInstance.objects.all()
if request.user.is_superuser:
queryset = queryset
elif request.user.role == User.ORGANIZATION_OWNER:
queryset = queryset.filter(
organisation_id=request.user.organization
)
else:
queryset = queryset.filter(
organisation_id=request.user.organization
).filter(Q(public_to_managers=True) | Q(users__id=request.user.id))
# Apply filters using request query parameters
dataset_visibility = request.query_params.get("dataset_visibility")
if dataset_visibility == "all_public_datasets":
queryset = queryset.filter(public_to_managers=True)
elif dataset_visibility == "my_datasets":
queryset = queryset.filter(users__id=request.user.id)
dataset_type = request.query_params.get("dataset_type")
if dataset_type:
queryset = queryset.filter(dataset_type__exact=dataset_type)
archived_datasets = request.query_params.get("archived_datasets")
# Sort by criteria
sort_type = request.query_params.get("sort_type")
if sort_type == "recently_updated":
queryset = queryset.order_by(F("last_updated").desc(nulls_last=True))
else:
queryset = queryset.order_by("instance_id")
# Optimize related field loading
queryset = queryset.prefetch_related(
Prefetch("users"), # Prefetch the related users
)
# Serialize the data
serializer = DatasetInstanceSerializerOptimized(queryset.distinct(), many=True)
# Batch process upload status for all datasets
# instance_ids = [instance["instance_id"] for instance in serializer.data]
# status_data = get_batch_dataset_upload_status(instance_ids)
# # Annotate upload status in the response
# for dataset_instance in serializer.data:
# instance_id = dataset_instance["instance_id"]
# if instance_id in status_data:
# dataset_instance.update(status_data[instance_id])
return Response(serializer.data, status=status.HTTP_200_OK)



# def list_optimized(self, request):
# # Base queryset determination based on user role
# if request.user.is_superuser:
# queryset = DatasetInstance.objects.all()
# elif request.user.role == User.ORGANIZATION_OWNER:
# queryset = DatasetInstance.objects.filter(
# organisation_id=request.user.organization
# )
# else:
# queryset = DatasetInstance.objects.filter(
# organisation_id=request.user.organization
# ).filter(Q(public_to_managers=True) | Q(users__id=request.user.id))
# # Apply optional filters based on query parameters
# if "dataset_visibility" in request.query_params:
# dataset_visibility = request.query_params["dataset_visibility"]
# if dataset_visibility == "all_public_datasets":
# if (
# request.user.role == User.WORKSPACE_MANAGER
# and not request.user.is_superuser
# ):
# queryset = queryset.filter(public_to_managers=True)
# elif dataset_visibility == "my_datasets":
# queryset = queryset.filter(users__id=request.user.id)
# if "dataset_type" in request.query_params:
# queryset = queryset.filter(
# dataset_type__exact=request.query_params["dataset_type"]
# )
# if "archived_datasets" in request.query_params:
# archived_datasets = request.query_params["archived_datasets"] == "true"
# queryset = queryset.filter(is_archived=archived_datasets)
# # Add sorting by custom criteria
# if (
# "sort_type" in request.query_params
# and request.query_params["sort_type"] == "recently_updated"
# ):
# queryset = queryset.order_by(F("last_updated").desc(nulls_last=True))
# else:
# queryset = queryset.order_by(F("instance_id").asc())
# # Serialize the distinct items using the optimized serializer
# serializer = DatasetInstanceSerializerOptimized(queryset.distinct(), many=True)
# # Add additional status fields to each dataset instance
# for dataset_instance in serializer.data:
# (
# dataset_instance_status,
# dataset_instance_date,
# dataset_instance_time,
# dataset_instance_result,
# ) = get_dataset_upload_status(dataset_instance["instance_id"])
# dataset_instance["last_upload_status"] = dataset_instance_status
# dataset_instance["last_upload_date"] = dataset_instance_date
# dataset_instance["last_upload_time"] = dataset_instance_time
# dataset_instance["last_upload_result"] = dataset_instance_result
# return Response(serializer.data, status=status.HTTP_200_OK)



@is_organization_owner
@action(methods=["GET"], detail=True, name="Download Dataset in CSV format")
Expand Down
3 changes: 2 additions & 1 deletion backend/notifications/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,11 @@ class Notification(models.Model):
metadata_json = models.JSONField(
blank=True, null=True, help_text="Additional metadata in JSON format."
)
# this field only contains the user id and whether the notification has been seen by the user, if not the user_Id doesn't exist in the JSON.
seen_json = models.JSONField(
blank=True,
null=True,
help_text="JSON field to store information about whether the notification has been seen.",
help_text="JSON field to store information about whether the notification has been seen by respective user Id.",
)

class Meta:
Expand Down
1 change: 1 addition & 0 deletions backend/notifications/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@
path("", viewNotifications, name="view_notification"),
path("create", createNotification, name="create_notification"),
path("changeState", mark_seen, name="mark_seen"),
path("unread/", allunreadNotifications, name='unread-notifications'),
]
51 changes: 50 additions & 1 deletion backend/notifications/views.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import json

from django.db.models import Q
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
Expand All @@ -11,6 +10,13 @@
from notifications.models import Notification
from notifications.tasks import create_notification_handler
from notifications.serializers import NotificationSerializer
import json
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets
from .models import Notification
from .serializers import NotificationSerializer


NO_NOTIFICATION_MESSAGE = {"message": "No notifications found"}
FETCH_NOTIFICATION_ERROR = {"message": "Cannot fetch notifications"}
Expand Down Expand Up @@ -96,3 +102,46 @@ def mark_seen(request):
notif.seen_json = s_json
notif.save()
return Response(NOTIFICATION_CHANGED_STATE, status=status.HTTP_200_OK)



# unreaded notification
class NotificationViewSet(viewsets.ModelViewSet):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer

@action(detail=False, methods=['get'])
def unseen_notifications(self, request):
unseen_notifications = self.get_queryset().filter(seen_json__isnull=True)
serializer = self.get_serializer(unseen_notifications, many=True)
return Response(serializer.data)


@swagger_auto_schema(
method="get",
manual_parameters=[],
responses={200: "Unread notifications fetched", 400: "Error while fetching unread notifications"},
)
@api_view(["GET"])
def allunreadNotifications(request):
"""Fetch all unseen notifications for the authenticated user and return the total count."""
try:
user = request.user # Get the authenticated user

# Fetch notifications where seen_json is empty or does not contain the user's ID marked as seen
notifications = Notification.objects.filter(
reciever_user_id=user.id
).exclude(Q(seen_json__contains={str(user.id): True})).order_by("-created_at")

# Get total count
total_count = notifications.count()

# Serialize the notifications
serialized_notifications = NotificationSerializer(notifications, many=True).data

except Exception as e:
print(f"Error fetching notifications: {str(e)}") # Print error in terminal
return Response({"error": "Error fetching notifications", "details": str(e)}, status=status.HTTP_400_BAD_REQUEST)

return Response({"notifications": serialized_notifications, "total_count": total_count}, status=status.HTTP_200_OK)

7 changes: 4 additions & 3 deletions backend/organizations/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,10 @@ class Meta:


class OrganizationSerializer(serializers.ModelSerializer):
created_by = UserReadSerializer(read_only=True)
# created_by = UserReadSerializer(read_only=True)

class Meta:
model = Organization
fields = ["id", "title", "email_domain_name", "created_by", "created_at"]
read_only_fields = ["id", "created_by", "created_at"]
# fields = ["id", "title", "email_domain_name", "created_by", "created_at"]
fields = ["id", "title",]
# read_only_fields = ["id", "created_by", "created_at"]
1 change: 1 addition & 0 deletions backend/projects/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,5 @@

urlpatterns = [
path("", include(router.urls)),
# path('notifications/', include('notifications.urls')),
]
18 changes: 6 additions & 12 deletions backend/projects/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -1443,12 +1443,7 @@ def list(self, request, *args, **kwargs):
400: "Please Login!",
},
)
@action(
detail=False,
methods=["get"],
url_name="list-optimized",
url_path="projects_list/optimized",
)
@action(detail=False,methods=["get"],url_name="list-optimized",url_path="projects_list/optimized")
def list_optimized(self, request):
"""
List all projects with some optimizations.
Expand Down Expand Up @@ -2189,6 +2184,9 @@ def create(self, request, *args, **kwargs):
automatic_annotation_creation_mode = request.data.get(
"automatic_annotation_creation_mode"
)
user_id= request.data.get("created_by")
if(user_id):user = User.objects.get(id=user_id)
request.data["created_by"] = user.id

if project_mode == Collection:
# Create project object
Expand All @@ -2197,6 +2195,7 @@ def create(self, request, *args, **kwargs):
project_id = project_response.data["id"]

proj = Project.objects.get(id=project_id)
proj.created_by=user
if proj.required_annotators_per_task > 1:
proj.project_stage = REVIEW_STAGE
proj.save()
Expand Down Expand Up @@ -2804,12 +2803,7 @@ def assign_new_review_tasks(self, request, pk, *args, **kwargs):
{"message": "Tasks assigned successfully"}, status=status.HTTP_200_OK
)

@action(
detail=True,
methods=["post"],
name="Unassign review tasks",
url_name="unassign_review_tasks",
)
@action(detail=True,methods=["post"],name="Unassign review tasks",url_name="unassign_review_tasks",)
@project_is_archived
def unassign_review_tasks(self, request, pk, *args, **kwargs):
"""
Expand Down
1 change: 1 addition & 0 deletions backend/shoonya_backend/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
"0.0.0.0",
"backend.shoonya.ai4bharat.org",
"backend.shoonya2.ai4bharat.org",
"127.0.0.1",
]

# Application definition
Expand Down
7 changes: 1 addition & 6 deletions backend/users/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,12 +105,7 @@ class Meta:
"is_active",
]
read_only_fields = [
"id",
"email",
"role",
"organization",
"unverified_email",
"date_joined",
"id","organization",
]


Expand Down
15 changes: 15 additions & 0 deletions backend/utils/dataset_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
def get_batch_dataset_upload_status(instance_ids):
"""
Batch fetch upload status for a list of dataset instance IDs.
Replace this with actual logic to retrieve status from your database.
"""
# Mock data for testing
status_data = {}
for instance_id in instance_ids:
status_data[instance_id] = {
"last_upload_status": "Completed",
"last_upload_date": "2023-01-01",
"last_upload_time": "12:00:00",
"last_upload_result": "Success",
}
return status_data
Loading