···
module.exports = async function ({ github, context, core, dry }) {
2
-
const Bottleneck = require('bottleneck')
const path = require('node:path')
const { DefaultArtifactClient } = require('@actions/artifact')
const { readFile, writeFile } = require('node:fs/promises')
5
+
const withRateLimit = require('./withRateLimit.js')
const artifactClient = new DefaultArtifactClient()
16
-
// Rate-Limiting and Throttling, see for details:
17
-
// https://github.com/octokit/octokit.js/issues/1069#throttling
18
-
// https://docs.github.com/en/rest/using-the-rest-api/best-practices-for-using-the-rest-api
19
-
const allLimits = new Bottleneck({
20
-
// Avoid concurrent requests
22
-
// Will be updated with first `updateReservoir()` call below.
25
-
// Pause between mutative requests
26
-
const writeLimits = new Bottleneck({ minTime: 1000 }).chain(allLimits)
27
-
github.hook.wrap('request', async (request, options) => {
28
-
// Requests to the /rate_limit endpoint do not count against the rate limit.
29
-
if (options.url == '/rate_limit') return request(options)
30
-
// Search requests are in a different resource group, which allows 30 requests / minute.
31
-
// We do less than a handful each run, so not implementing throttling for now.
32
-
if (options.url.startsWith('/search/')) return request(options)
34
-
if (['POST', 'PUT', 'PATCH', 'DELETE'].includes(options.method))
35
-
return writeLimits.schedule(request.bind(null, options))
36
-
else return allLimits.schedule(request.bind(null, options))
39
-
async function updateReservoir() {
42
-
response = await github.rest.rateLimit.get()
44
-
core.error(`Failed updating reservoir:\n${err}`)
45
-
// Keep retrying on failed rate limit requests instead of exiting the script early.
48
-
// Always keep 1000 spare requests for other jobs to do their regular duty.
49
-
// They normally use below 100, so 1000 is *plenty* of room to work with.
50
-
const reservoir = Math.max(0, response.data.resources.core.remaining - 1000)
51
-
core.info(`Updating reservoir to: ${reservoir}`)
52
-
allLimits.updateSettings({ reservoir })
54
-
await updateReservoir()
55
-
// Update remaining requests every minute to account for other jobs running in parallel.
56
-
const reservoirUpdater = setInterval(updateReservoir, 60 * 1000)
58
-
async function handlePullRequest(item) {
9
+
async function handlePullRequest({ item, stats }) {
const log = (k, v) => core.info(`PR #${item.number} - ${k}: ${v}`)
const pull_number = item.number
···
224
-
async function handle(item) {
175
+
async function handle({ item, stats }) {
const log = (k, v, skip) => {
core.info(`#${item.number} - ${k}: ${v}` + (skip ? ' (skipped)' : ''))
···
if (item.pull_request || context.payload.pull_request) {
240
-
Object.assign(itemLabels, await handlePullRequest(item))
191
+
Object.assign(itemLabels, await handlePullRequest({ item, stats }))
···
280
+
await withRateLimit({ github, core }, async (stats) => {
if (context.payload.pull_request) {
331
-
await handle(context.payload.pull_request)
282
+
await handle({ item: context.payload.pull_request, stats })
await github.rest.actions.listWorkflowRuns({
···
arr.findIndex((firstItem) => firstItem.number == thisItem.number),
450
-
;(await Promise.allSettled(items.map(handle)))
401
+
;(await Promise.allSettled(items.map((item) => handle({ item, stats }))))
.filter(({ status }) => status == 'rejected')
core.setFailed(`${reason.message}\n${reason.cause.stack}`),
457
-
`Processed ${stats.prs} PRs, ${stats.issues} Issues, made ${stats.requests + stats.artifacts} API requests and downloaded ${stats.artifacts} artifacts.`,
461
-
clearInterval(reservoirUpdater)