Skip to content
This repository has been archived by the owner on Jan 25, 2024. It is now read-only.

Fix issue with exceeding rate limits #1892

Merged
merged 2 commits into from
Aug 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
160 changes: 104 additions & 56 deletions scripts/update_top_ranking_issues/main.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import os
import sys
from collections import defaultdict
from datetime import datetime
Expand All @@ -11,7 +12,7 @@
"design",
"documentation",
"enhancement",
"panic / crash"
"panic / crash",
]
CORE_LABEL_NAMES_SET = set(CORE_LABEL_NAMES_LIST)
IGNORED_LABEL_NAMES_LIST = [
Expand All @@ -38,22 +39,25 @@ def __init__(self, issue):


def main():
if len(sys.argv) < 2:
raise CommandLineArgumentException("A GitHub access token must be supplied")
github_access_token = os.getenv("GITHUB_TOKEN")

dev_mode = False
if not github_access_token:
raise CommandLineArgumentException(
'A GitHub access token must be provided in the env as: "GITHUB_TOKEN"'
)

prod_mode = False

if len(sys.argv) == 3:
dev_mode_text = "dev_mode"
if len(sys.argv) == 2:
prod_mode_text = "prod_mode"

if sys.argv[2] == dev_mode_text:
dev_mode = True
if sys.argv[1] == prod_mode_text:
prod_mode = True
else:
raise CommandLineArgumentException(
f'If second argument is supplied, it must be "{dev_mode_text}"'
f'If first argument is supplied, it must be "{prod_mode_text}"'
)

github_access_token = sys.argv[1]
github = Github(github_access_token)

repo_name = "zed-industries/community"
Expand All @@ -69,36 +73,81 @@ def main():
error_message_to_erroneous_issue_data_list_map,
)

if dev_mode:
print(issue_text)
else:
if prod_mode:
top_ranking_issues_issue = repository.get_issue(number=52)
top_ranking_issues_issue.edit(body=issue_text)
else:
print(issue_text)

remaining_requests, max_requests = github.rate_limiting
print(f"Remaining requests: {remaining_requests}")


# TODO: Refactor this at some point
def get_issue_maps(github, repository):
query_string = f"repo:{repository.full_name} is:open is:issue"
label_name_to_issue_list_map = get_label_name_to_issue_list_map(github, repository)
label_name_to_issue_data_list_map = get_label_name_to_issue_data_list_map(
label_name_to_issue_list_map
)

error_message_to_erroneous_issue_list_map = (
get_error_message_to_erroneous_issue_list_map(github, repository)
)
error_message_to_erroneous_issue_data_list_map = (
get_error_message_to_erroneous_issue_data_list_map(
error_message_to_erroneous_issue_list_map
)
)

# Create a new dictionary with labels ordered by the summation the of likes on the associated issues
label_names = list(label_name_to_issue_data_list_map.keys())

label_names.sort(
key=lambda label_name: sum(
issue_data.like_count
for issue_data in label_name_to_issue_data_list_map[label_name]
),
reverse=True,
)

label_name_to_issue_data_list_map = {
label_name: label_name_to_issue_data_list_map[label_name]
for label_name in label_names
}

return (
label_name_to_issue_data_list_map,
error_message_to_erroneous_issue_data_list_map,
)


def get_label_name_to_issue_list_map(github, repository):
label_name_to_issue_list_map = defaultdict(list)
error_message_to_erroneous_issue_list_map = defaultdict(list)

for issue in github.search_issues(query_string):
labels_on_issue_set = set(label["name"] for label in issue._rawData["labels"])
core_labels_on_issue_set = labels_on_issue_set & CORE_LABEL_NAMES_SET
ignored_labels_on_issue_set = labels_on_issue_set & IGNORED_LABEL_NAMES_SET
for label in CORE_LABEL_NAMES_SET:
query_string = f'repo:{repository.full_name} is:open is:issue label:"{label}" sort:reactions-+1-desc'

if ignored_labels_on_issue_set:
continue
issue_count = 0

if len(core_labels_on_issue_set) == 0:
error_message_to_erroneous_issue_list_map["missing core label"].append(
issue
for issue in github.search_issues(query_string):
labels_on_issue_set = set(
label["name"] for label in issue._rawData["labels"]
)
else:
for core_label_on_issue in core_labels_on_issue_set:
label_name_to_issue_list_map[core_label_on_issue].append(issue)
ignored_labels_on_issue_set = labels_on_issue_set & IGNORED_LABEL_NAMES_SET

if ignored_labels_on_issue_set:
continue

label_name_to_issue_list_map[label].append(issue)

issue_count += 1

if issue_count >= ISSUES_PER_LABEL:
break

return label_name_to_issue_list_map


def get_label_name_to_issue_data_list_map(label_name_to_issue_list_map):
label_name_to_issue_data_list_map = {}

for label_name in label_name_to_issue_list_map:
Expand All @@ -111,38 +160,38 @@ def get_issue_maps(github, repository):
)
)

issue_data_list = issue_data_list[0:ISSUES_PER_LABEL]

if issue_data_list:
label_name_to_issue_data_list_map[label_name] = issue_data_list

return label_name_to_issue_data_list_map


def get_error_message_to_erroneous_issue_list_map(github, repository):
error_message_to_erroneous_issue_list_map = defaultdict(list)

filter_labels = CORE_LABEL_NAMES_SET.union(IGNORED_LABEL_NAMES_SET)
filter_labels_string = " ".join([f'-label:"{label}"' for label in filter_labels])
query_string = (
f"repo:{repository.full_name} is:open is:issue {filter_labels_string}"
)

for issue in github.search_issues(query_string):
error_message_to_erroneous_issue_list_map["missing core label"].append(issue)

return error_message_to_erroneous_issue_list_map


def get_error_message_to_erroneous_issue_data_list_map(
error_message_to_erroneous_issue_list_map,
):
error_message_to_erroneous_issue_data_list_map = {}

for label_name in error_message_to_erroneous_issue_list_map:
issue_list = error_message_to_erroneous_issue_list_map[label_name]
issue_data_list = [IssueData(issue) for issue in issue_list]
error_message_to_erroneous_issue_data_list_map[label_name] = issue_data_list

# Create a new dictionary with labels ordered by the summation the of likes on the associated issues
label_names = list(label_name_to_issue_data_list_map.keys())

label_names.sort(
key=lambda label_name: sum(
issue_data.like_count
for issue_data in label_name_to_issue_data_list_map[label_name]
),
reverse=True,
)

label_name_to_issue_data_list_map = {
label_name: label_name_to_issue_data_list_map[label_name]
for label_name in label_names
}

return (
label_name_to_issue_data_list_map,
error_message_to_erroneous_issue_data_list_map,
)
return error_message_to_erroneous_issue_data_list_map


def get_issue_text(
Expand All @@ -169,10 +218,10 @@ def get_issue_text(

if erroneous_issues_lines:
core_label_names_string = ", ".join(
f'"{core_label_name}"' for core_label_name in CORE_LABEL_NAMES_LIST
f'"{core_label_name}"' for core_label_name in CORE_LABEL_NAMES_SET
)
ignored_label_names_string = ", ".join(
f'"{ignored_label_name}"' for ignored_label_name in IGNORED_LABEL_NAMES_LIST
f'"{ignored_label_name}"' for ignored_label_name in IGNORED_LABEL_NAMES_SET
)

issue_text_lines.extend(
Expand Down Expand Up @@ -206,7 +255,9 @@ def get_highest_ranking_issues_lines(label_name_to_issue_data_list_dictionary):
highest_ranking_issues_lines.append(f"\n## {label}\n")

for issue_data in issue_data_list:
markdown_bullet_point = f"{issue_data.url} ({issue_data.like_count} :thumbsup:)"
markdown_bullet_point = (
f"{issue_data.url} ({issue_data.like_count} :thumbsup:)"
)
markdown_bullet_point = f"- {markdown_bullet_point}"
highest_ranking_issues_lines.append(markdown_bullet_point)

Expand Down Expand Up @@ -234,6 +285,3 @@ def get_erroneous_issues_lines(error_message_to_erroneous_issue_data_list_map):
main()
run_duration = datetime.now() - start_time
print(run_duration)

# TODO: Progress prints
# - "Gathering issues..."
2 changes: 0 additions & 2 deletions scripts/update_top_ranking_issues/run_dev.example

This file was deleted.