Skip to content

Commit

Permalink
Filter discarded conversations via api
Browse files Browse the repository at this point in the history
  • Loading branch information
beastoin committed Jan 14, 2025
1 parent cb233c9 commit 38b0e1a
Show file tree
Hide file tree
Showing 6 changed files with 45 additions and 23 deletions.
17 changes: 13 additions & 4 deletions app/lib/backend/http/api/conversations.dart
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,13 @@ Future<CreateConversationResponse?> processInProgressConversation() async {
}

Future<List<ServerConversation>> getConversations(
{int limit = 50, int offset = 0, List<ConversationStatus> statuses = const []}) async {
{int limit = 50,
int offset = 0,
List<ConversationStatus> statuses = const [],
bool includeDiscarded = true}) async {
var response = await makeApiCall(
url:
'${Env.apiBaseUrl}v1/memories?limit=$limit&offset=$offset&statuses=${statuses.map((val) => val.toString().split(".").last).join(",")}',
'${Env.apiBaseUrl}v1/memories?include_discarded=$includeDiscarded&limit=$limit&offset=$offset&statuses=${statuses.map((val) => val.toString().split(".").last).join(",")}',
headers: {},
method: 'GET',
body: '');
Expand Down Expand Up @@ -353,12 +356,18 @@ Future<SyncLocalFilesResponse> syncLocalFiles(List<File> files) async {
}
}

Future<(List<ServerConversation>, int, int)> searchConversationsServer(String query, [int? page, int? limit]) async {
Future<(List<ServerConversation>, int, int)> searchConversationsServer(
String query, {
int? page,
int? limit,
bool includeDiscarded = false,
}) async {
var response = await makeApiCall(
url: '${Env.apiBaseUrl}v1/memories/search',
headers: {},
method: 'POST',
body: jsonEncode({'query': query, 'page': page ?? 1, 'per_page': limit ?? 10}),
body:
jsonEncode({'query': query, 'page': page ?? 1, 'per_page': limit ?? 10, 'include_discarded': includeDiscarded}),
);
if (response == null) return (<ServerConversation>[], 0, 0);
if (response.statusCode == 200) {
Expand Down
10 changes: 6 additions & 4 deletions app/lib/providers/conversation_provider.dart
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ class ConversationProvider extends ChangeNotifier implements IWalServiceListener

setIsFetchingConversations(true);
previousQuery = query;
var (convos, current, total) = await searchConversationsServer(query);
var (convos, current, total) = await searchConversationsServer(query, includeDiscarded: showDiscardedConversations);
convos.sort((a, b) => b.createdAt.compareTo(a.createdAt));
searchedConversations = convos;
currentSearchPage = current;
Expand All @@ -106,7 +106,8 @@ class ConversationProvider extends ChangeNotifier implements IWalServiceListener
setLoadingConversations(true);
var (newConvos, current, total) = await searchConversationsServer(
previousQuery,
currentSearchPage + 1,
page: currentSearchPage + 1,
includeDiscarded: showDiscardedConversations,
);
searchedConversations.addAll(newConvos);
searchedConversations.sort((a, b) => b.createdAt.compareTo(a.createdAt));
Expand Down Expand Up @@ -251,7 +252,7 @@ class ConversationProvider extends ChangeNotifier implements IWalServiceListener

Future getConversationsFromServer() async {
setLoadingConversations(true);
var mem = await getConversations();
var mem = await getConversations(includeDiscarded: showDiscardedConversations);
conversations = mem;
conversations.sort((a, b) => b.createdAt.compareTo(a.createdAt));
setLoadingConversations(false);
Expand Down Expand Up @@ -281,7 +282,8 @@ class ConversationProvider extends ChangeNotifier implements IWalServiceListener
if (conversations.length % 50 != 0) return;
if (isLoadingConversations) return;
setLoadingConversations(true);
var newConversations = await getConversations(offset: conversations.length);
var newConversations =
await getConversations(offset: conversations.length, includeDiscarded: showDiscardedConversations);
conversations.addAll(newConversations);
conversations.sort((a, b) => b.createdAt.compareTo(a.createdAt));
groupConversationsByDate();
Expand Down
1 change: 1 addition & 0 deletions backend/models/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,3 +274,4 @@ class SearchRequest(BaseModel):
query: str
page: Optional[int] = 1
per_page: Optional[int] = 10
include_discarded: Optional[bool] = True
26 changes: 13 additions & 13 deletions backend/routers/memories.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,9 @@ def reprocess_memory(


@router.get('/v1/memories', response_model=List[Memory], tags=['memories'])
def get_memories(limit: int = 100, offset: int = 0, statuses: str = "", uid: str = Depends(auth.get_current_user_uid)):
def get_memories(limit: int = 100, offset: int = 0, statuses: str = "", include_discarded: bool = True, uid: str = Depends(auth.get_current_user_uid)):
print('get_memories', uid, limit, offset, statuses)
return memories_db.get_memories(uid, limit, offset, include_discarded=True,
return memories_db.get_memories(uid, limit, offset, include_discarded=include_discarded,
statuses=statuses.split(",") if len(statuses) > 0 else [])


Expand Down Expand Up @@ -202,16 +202,16 @@ def set_assignee_memory_segment(
raise HTTPException(status_code=400, detail="Invalid assign type")

memories_db.update_memory_segments(uid, memory_id, [segment.dict() for segment in memory.transcript_segments])
segment_words = len(memory.transcript_segments[segment_idx].text.split(' '))

# TODO: can do this async
if use_for_speech_training and not is_unassigning and segment_words > 5: # some decent sample at least
person_id = value if assign_type == 'person_id' else None
expand_speech_profile(memory_id, uid, segment_idx, assign_type, person_id)
else:
path = f'{memory_id}_segment_{segment_idx}.wav'
delete_additional_profile_audio(uid, path)
delete_speech_sample_for_people(uid, path)
# thinh's note: disabled for now
# segment_words = len(memory.transcript_segments[segment_idx].text.split(' '))
# # TODO: can do this async
# if use_for_speech_training and not is_unassigning and segment_words > 5: # some decent sample at least
# person_id = value if assign_type == 'person_id' else None
# expand_speech_profile(memory_id, uid, segment_idx, assign_type, person_id)
# else:
# path = f'{memory_id}_segment_{segment_idx}.wav'
# delete_additional_profile_audio(uid, path)
# delete_speech_sample_for_people(uid, path)

return memory

Expand Down Expand Up @@ -343,4 +343,4 @@ def get_public_memories(offset: int = 0, limit: int = 1000):
@router.post("/v1/memories/search", response_model=dict, tags=['memories'])
def search_memories_endpoint(search_request: SearchRequest, uid: str = Depends(auth.get_current_user_uid)):
return search_memories(query=search_request.query, page=search_request.page,
per_page=search_request.per_page, uid=uid)
per_page=search_request.per_page, uid=uid, include_discarded=search_request.include_discarded)
6 changes: 5 additions & 1 deletion backend/typesense/memories.schema
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,12 @@
{
"name": "userId",
"type": "string"
},
{
"name": "discarded",
"type": "bool"
}
],
"default_sorting_field": "created_at",
"enable_nested_fields": true
}
}
8 changes: 7 additions & 1 deletion backend/utils/memories/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,18 @@ def search_memories(
query: str,
page: int = 1,
per_page: int = 10,
include_discarded: bool = True,
) -> Dict:
try:

filter_by = f'userId:={uid} && deleted:=false'
if not include_discarded:
filter_by = filter_by + ' && discarded:=false'

search_parameters = {
'q': query,
'query_by': 'structured, transcript_segments',
'filter_by': 'userId := ' + uid,
'filter_by': filter_by,
'sort_by': 'created_at:desc',
'per_page': per_page,
'page': page,
Expand Down

0 comments on commit 38b0e1a

Please sign in to comment.