Skip to content
This repository has been archived by the owner on Mar 1, 2024. It is now read-only.

read all jira issues in chunks of 50 #912

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions llama_hub/jira/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,15 @@ You can follow this link for more information regarding Oauth2 -> https://develo

Here's an example of how to use it

Please note max_results parameter is the maximum number of issues you want to fetch from Jira
max_results optional, defaults to 50

```python

from llama_hub.jira import JiraReader

reader = JiraReader(email=email, api_token=api_token, server_url="your-jira-server.com")
documents = reader.load_data(query='project = <your-project>')
documents = reader.load_data(query='project = <your-project>', max_results = 500)

```

Expand All @@ -37,6 +40,6 @@ from llama_index import download_loader
JiraReader = download_loader('JiraReader')

reader = JiraReader(email=email, api_token=api_token, server_url="your-jira-server.com")
documents = reader.load_data(query='project = <your-project>')
documents = reader.load_data(query='project = <your-project>', max_results = 500)

```
16 changes: 13 additions & 3 deletions llama_hub/jira/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,18 @@ def __init__(
server=f"https://{BasicAuth['server_url']}",
)

def load_data(self, query: str) -> List[Document]:
relevant_issues = self.jira.search_issues(query)
def load_data(self, query: str, max_results: Optional[int] = [50]) -> List[Document]:
relevant_issues = []
start_at = 0

while True:
chunk_issues = self.jira.search_issues(
query, startAt=start_at, maxResults=max_results
)
relevant_issues.extend(chunk_issues)
if len(chunk_issues) < max_results:
break
start_at += max_results

issues = []

Expand Down Expand Up @@ -92,7 +102,7 @@ def load_data(self, query: str) -> List[Document]:

issues.append(
Document(
text=f"{issue.fields.summary} \n {issue.fields.description}",
text=f"{issue.key} {issue.fields.summary} \n {issue.fields.description}",
extra_info={
"id": issue.id,
"title": issue.fields.summary,
Expand Down