Note: I am not good scripting. I have been trying to use gpt and copilot to help create these scripts.
I had a working python script to fetch worklogs across specific projects for a list of users and then group it by ticket and author.
However, recently it started failing with an error that suggested the author info was not longer being returned to the api call.
I seeked help from the ai chatbots to help resolve this. During this troubleshooting, I found that some of the api endpoints and calls were deprecated.
copilot gave me multiple rounds of updated script, but I think at one point it is just going in loop. When it looks at the error saying "the requested api has been removed" it gives me an updated script, which produces "invalid request payload" error.
I dont think with the limited scripting knowledge I have, i can fix this with the help of the chatbots. Can anyone point out how this issue can be resolved. Below is the current code and requirement.
Need:
Fetch all worklogs on tickets for specific set of projects only for a list of users and produce that into 2 spreadsheets (group by author and ticket number)
Script:
#py filename.py is the command to run this
import requests
import pandas as pd
from datetime import datetime
from requests.auth import HTTPBasicAuth
# === CONFIGURATION ===
JIRA_BASE_URL = "https://mysite.atlassian.net"
API_USER_EMAIL = "my email address"
API_TOKEN = "api token"
PROJECT_KEYS = ["project1", "project2"]
START_DATE = "2025-09-01"
END_DATE = "2025-09-10"
ALLOWED_ACCOUNT_IDS = ["accid1", "accid2"] # Replace with actual Jira accountIds
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
EXCEL_FILENAME = f"cloud_worklogs_summary_{timestamp}.xlsx"
# === AUTH HEADERS ===
auth = HTTPBasicAuth(API_USER_EMAIL, API_TOKEN)
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
# === FUNCTIONS ===
def get_issues(project_key):
all_issues = []
start_at = 0
while True:
# Use updated JQL without deprecated worklogDate
jql = f'project = {project_key} AND updated >= "{START_DATE}"'
url = f"{JIRA_BASE_URL}/rest/api/3/search"
params = {
"jql": jql,
"fields": "key",
"startAt": start_at,
"maxResults": 100
}
response = requests.get(url, headers=headers, params=params, auth=auth)
if response.status_code != 200:
print(f"❌ Failed to fetch issues for project {project_key}: {response.status_code}")
break
data = response.json()
issues = data.get("issues", [])
all_issues.extend(issues)
if len(issues) < 100:
break
start_at += 100
return all_issues
def get_worklogs(issue_key):
url = f"{JIRA_BASE_URL}/rest/api/3/issue/{issue_key}/worklog"
response = requests.get(url, headers=headers, auth=auth)
if response.status_code != 200:
print(f"❌ Failed to fetch worklogs for issue {issue_key}: {response.status_code}")
return []
return response.json().get("worklogs", [])
# === COLLECT DATA ===
rows = []
for project in PROJECT_KEYS:
issues = get_issues(project)
for issue in issues:
issue_key = issue["key"]
worklogs = get_worklogs(issue_key)
for log in worklogs:
started = log.get("started", "")[:10]
author = log.get("author", {})
account_id = author.get("accountId", "").lower()
author_name = author.get("displayName", "Unknown")
if START_DATE <= started <= END_DATE and account_id in [a.lower() for a in ALLOWED_ACCOUNT_IDS]:
hours = log.get("timeSpentSeconds", 0) / 3600
rows.append({
"Ticket": issue_key,
"Author": author_name,
"Account ID": account_id,
"Hours Logged": round(hours, 2),
"Date": started
})
# === CREATE DATAFRAMES ===
df_logs = pd.DataFrame(rows)
if not df_logs.empty and "Author" in df_logs.columns and "Account ID" in df_logs.columns:
df_summary = df_logs.groupby(["Author", "Account ID"])["Hours Logged"].sum().reset_index()
df_summary["Date Range"] = f"{START_DATE} to {END_DATE}"
else:
print("⚠️ No valid worklog data found. Skipping summary by user.")
df_summary = pd.DataFrame(columns=["Author", "Account ID", "Hours Logged", "Date Range"])
if not df_logs.empty and "Ticket" in df_logs.columns:
df_ticket_summary = df_logs.groupby("Ticket")["Hours Logged"].sum().reset_index()
df_ticket_summary["Date Range"] = f"{START_DATE} to {END_DATE}"
else:
print("⚠️ No valid ticket data found. Skipping summary by ticket.")
df_ticket_summary = pd.DataFrame(columns=["Ticket", "Hours Logged", "Date Range"])
# === EXPORT TO EXCEL ===
with pd.ExcelWriter(EXCEL_FILENAME, engine="openpyxl") as writer:
df_logs.to_excel(writer, sheet_name="Worklogs", index=False)
df_summary.to_excel(writer, sheet_name="Summary by User", index=False)
df_ticket_summary.to_excel(writer, sheet_name="Summary by Ticket", index=False)
print(f"\n✅ Excel file with summary saved to: {EXCEL_FILENAME}")
-----------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------
This is a smaller version of the script to debug that only fetches the records, but it fails too with errors saying invalid payload.
# === AUTH HEADERS ===
auth = HTTPBasicAuth(API_USER_EMAIL, API_TOKEN)
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
# === FETCH ISSUES ===
for project_key in PROJECT_KEYS:
jql = f'project = {project_key} AND updated >= "{START_DATE}"'
url = f"{JIRA_BASE_URL}/rest/api/3/search/jql"
payload = {
"queries": [
{
"query": jql,
"pagination": {
"startAt": 0,
"maxResults": 50
},
"fields": ["key", "summary"]
}
]
}
response = requests.post(url, headers=headers, auth=auth, json=payload)
print(f"\n🔍 Project: {project_key} | Status: {response.status_code}")
if response.status_code == 200:
data = response.json()
for issue in data.get("results", [])[0].get("issues", []):
print(f"🆔 {issue['key']} | Summary: {issue['fields'].get('summary')}")
else:
print(response.text)