1#!/usr/bin/env python
2
3import argparse
4import requests
5import os
6import subprocess
7import json
8import re
9from bs4 import BeautifulSoup
10
11parser = argparse.ArgumentParser(
12 description="Get all available versions listed for a package in a site."
13)
14
15parser.add_argument(
16 "--pname",
17 default=os.environ.get("UPDATE_NIX_PNAME"),
18 required="UPDATE_NIX_PNAME" not in os.environ,
19 help="name of the package",
20)
21parser.add_argument(
22 "--attr-path",
23 default=os.environ.get("UPDATE_NIX_ATTR_PATH"),
24 help="attribute path of the package",
25)
26parser.add_argument("--url", help="url of the page that lists the package versions")
27parser.add_argument("--file", help="file name for writing debugging information")
28
29parser.add_argument("--extra-regex", help="additional regex to filter versions with")
30
31
32if __name__ == "__main__":
33 args = parser.parse_args()
34
35 pname = args.pname
36
37 attr_path = args.attr_path or pname
38
39 url = args.url or json.loads(
40 subprocess.check_output(
41 [
42 "nix-instantiate",
43 "--json",
44 "--eval",
45 "-E",
46 f"with import ./. {{}}; dirOf (lib.head {attr_path}.src.urls)",
47 ],
48 text=True,
49 )
50 )
51
52 # print a debugging message
53 if args.file:
54 with open(args.file, "a") as f:
55 f.write(f"# Listing versions for {pname} from {url}\n")
56
57 page = requests.get(url)
58 soup = BeautifulSoup(page.content, "html.parser")
59 links = soup.find_all("a")
60 for link in links:
61 link_url = link.get("href", None)
62 if link_url is not None:
63 match = re.fullmatch(
64 rf"(.*/)?{args.pname}[-_]([\d.]+?(-[\d\w.-]+?)?)(\.tar)?(\.[^.]*)", link_url
65 )
66 if match:
67 version = match.group(2)
68 if (not args.extra_regex) or re.fullmatch(args.extra_regex, version):
69 print(version)