My agentic slop goes here. Not intended for anyone else!

zotero

+2
stack/zotero-translation/.gitignore
···
+
.*.swp
+
_build
+1
stack/zotero-translation/.ocamlformat
···
+
version=0.26.2
+7
stack/zotero-translation/README.md
···
+
Partial OCaml interface to the [Zotero Translation Server](https://github.com/zotero/translation-server).
+
+
This one uses Lwt so it's compatible with 5.2.0+jst (which doesn't have effects yet).
+
+
It also vendors in a chunk of B0 to be standalone.
+
+
This isn't really suitable for use by anyone yet except for Anil's personal website.
+639
stack/zotero-translation/bibtex.ml
···
+
(*---------------------------------------------------------------------------
+
Copyright (c) 2019 University of Bern. All rights reserved.
+
Distributed under the ISC license, see terms at the end of the file.
+
---------------------------------------------------------------------------*)
+
+
[@@@warning "-27-32-33-34"]
+
+
module SM = Map.Make(String)
+
+
module Err_msg = struct
+
let pf = Format.fprintf
+
let pp_sp = Format.pp_print_space
+
let pp_nop _ () = ()
+
let pp_any fmt ppf _ = pf ppf fmt
+
+
let pp_op_enum op ?(empty = pp_nop) pp_v ppf = function
+
| [] -> empty ppf ()
+
| [v] -> pp_v ppf v
+
| _ as vs ->
+
let rec loop ppf = function
+
| [v0; v1] -> pf ppf "%a@ %s@ %a" pp_v v0 op pp_v v1
+
| v :: vs -> pf ppf "%a,@ " pp_v v; loop ppf vs
+
| [] -> assert false
+
in
+
loop ppf vs
+
+
let pp_and_enum ?empty pp_v ppf vs = pp_op_enum "and" ?empty pp_v ppf vs
+
let pp_or_enum ?empty pp_v ppf vs = pp_op_enum "or" ?empty pp_v ppf vs
+
let pp_did_you_mean pp_v ppf = function
+
| [] -> () | vs -> pf ppf "Did@ you@ mean %a ?" (pp_or_enum pp_v) vs
+
+
let pp_must_be pp_v ppf = function
+
| [] -> () | vs -> pf ppf "Must be %a." (pp_or_enum pp_v) vs
+
+
let pp_unknown ~kind pp_v ppf v = pf ppf "Unknown %a %a." kind () pp_v v
+
let pp_unknown' ~kind pp_v ~hint ppf (v, hints) = match hints with
+
| [] -> pp_unknown ~kind pp_v ppf v
+
| hints -> pp_unknown ~kind pp_v ppf v; pp_sp ppf (); (hint pp_v) ppf hints
+
+
let min_by f a b = if f a <= f b then a else b
+
let max_by f a b = if f a <= f b then b else a
+
+
let edit_distance s0 s1 =
+
let minimum a b c = min a (min b c) in
+
let s0 = min_by String.length s0 s1 (* row *)
+
and s1 = max_by String.length s0 s1 in (* column *)
+
let m = String.length s0 and n = String.length s1 in
+
let rec rows row0 row i =
+
if i > n then row0.(m) else begin
+
row.(0) <- i;
+
for j = 1 to m do
+
if s0.[j - 1] = s1.[i - 1] then row.(j) <- row0.(j - 1) else
+
row.(j) <-minimum (row0.(j - 1) + 1) (row0.(j) + 1) (row.(j - 1) + 1)
+
done;
+
rows row row0 (i + 1)
+
end in
+
rows (Array.init (m + 1) (fun x -> x)) (Array.make (m + 1) 0) 1
+
+
let suggest ?(dist = 2) candidates s =
+
let add (min, acc) name =
+
let d = edit_distance s name in
+
if d = min then min, (name :: acc) else
+
if d < min then d, [name] else
+
min, acc
+
in
+
let d, suggs = List.fold_left add (max_int, []) candidates in
+
if d <= dist (* suggest only if not too far *) then List.rev suggs else []
+
end
+
+
module Tloc = struct
+
type fpath = string
+
let pp_path = Format.pp_print_string
+
+
type pos = int
+
type line = int
+
type line_pos = line * pos
+
(* For lines we keep the byte position just after the newlinexs. It
+
editors are still expecting tools to compute visual columns which
+
is stupid. By keeping these byte positions we can approximate
+
columns by subtracting the line byte position from the byte
+
location. This will only be correct on US-ASCII data though. Best
+
would be to be able to give them [sbyte] and [ebyte]. *)
+
+
let l v = v
+
type t =
+
{ file : fpath;
+
sbyte : pos; ebyte : pos;
+
sline : pos * line; eline : pos * line }
+
+
let no_file = "-"
+
let v ~file ~sbyte ~ebyte ~sline ~eline = { file; sbyte; ebyte; sline; eline }
+
let file l = l.file
+
let sbyte l = l.sbyte
+
let ebyte l = l.ebyte
+
let sline l = l.sline
+
let eline l = l.eline
+
let nil =
+
let pnil = -1 in
+
let lnil = (-1, pnil) in
+
v ~file:no_file ~sbyte:pnil ~ebyte:pnil ~sline:lnil ~eline:lnil
+
+
let merge l0 l1 =
+
let sbyte, sline =
+
if l0.sbyte < l1.sbyte then l0.sbyte, l0.sline else l1.sbyte, l1.sline
+
in
+
let ebyte, eline =
+
if l0.ebyte < l1.ebyte then l1.ebyte, l1.eline else l0.ebyte, l0.eline
+
in
+
v ~file:l0.file ~sbyte ~ebyte ~sline ~eline
+
+
let to_start l =
+
v ~file:l.file ~sbyte:l.sbyte ~ebyte:l.sbyte ~sline:l.sline ~eline:l.sline
+
+
let to_end l =
+
v ~file:l.file ~sbyte:l.ebyte ~ebyte:l.ebyte ~sline:l.eline ~eline:l.eline
+
+
let restart ~at:s e =
+
v ~file:e.file ~sbyte:s.sbyte ~ebyte:e.ebyte ~sline:s.sline ~eline:e.eline
+
+
let pf = Format.fprintf
+
let pp_ocaml ppf l = match l.ebyte < 0 with
+
| true -> pf ppf "File \"%a\", line n/a, characters n/a" pp_path l.file
+
| false ->
+
let pp_lines ppf l = match fst l.sline = fst l.eline with
+
| true -> pf ppf "line %d" (fst l.sline)
+
| false -> pf ppf "lines %d-%d" (fst l.sline) (fst l.eline)
+
in
+
(* "characters" represent positions (insertion points) not columns *)
+
let pos_s = l.sbyte - snd l.sline in
+
let pos_e = l.ebyte - snd l.eline + 1 in
+
pf ppf "File \"%a\", %a, characters %d-%d"
+
pp_path l.file pp_lines l pos_s pos_e
+
+
let pp_gnu ppf l = match l.ebyte < 0 with
+
| true -> pf ppf "%a:" pp_path l.file
+
| false ->
+
let pp_lines ppf l =
+
let col_s = l.sbyte - snd l.sline + 1 in
+
let col_e = l.ebyte - snd l.eline + 1 in
+
match fst l.sline = fst l.eline with
+
| true -> pf ppf "%d.%d-%d" (fst l.sline) col_s col_e
+
| false ->
+
pf ppf "%d.%d-%d.%d" (fst l.sline) col_s (fst l.eline) col_e
+
in
+
pf ppf "%a:%a" pp_path l.file pp_lines l
+
+
let pp_dump ppf l =
+
pf ppf "[bytes %d;%d][lines %d;%d][lbytes %d;%d]"
+
l.sbyte l.ebyte (fst l.sline) (fst l.eline) (snd l.sline) (snd l.eline)
+
+
let pp = pp_gnu
+
end
+
+
module Utf_8 = struct
+
type case =
+
| L1 | L2 | L3_E0 | L3_E1_EC_or_EE_EF | L3_ED | L4_F0 | L4_F1_F3 | L4_F4 | E
+
+
let case =
+
(*
+
(* See https://tools.ietf.org/html/rfc3629#section-4 *)
+
Printf.printf "[|";
+
for i = 0 to 255 do
+
if i mod 16 = 0 then Printf.printf "\n";
+
if 0x00 <= i && i <= 0x7F then Printf.printf "L1; " else
+
if 0xC2 <= i && i <= 0xDF then Printf.printf "L2; " else
+
if 0xE0 = i then Printf.printf "L3_E0; " else
+
if 0xE1 <= i && i <= 0xEC || 0xEE <= i && i <= 0xEF
+
then Printf.printf "L3_E1_EC_or_EE_EF; " else
+
if 0xED = i then Printf.printf "L3_ED;" else
+
if 0xF0 = i then Printf.printf "L4_F0; " else
+
if 0xF1 <= i && i <= 0xF3 then Printf.printf "L4_F1_F3; " else
+
if 0xF4 = i then Printf.printf "L4_F4; " else
+
Printf.printf "E; "
+
done;
+
Printf.printf "\n|]"
+
*)
+
[|
+
L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1;
+
L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1;
+
L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1;
+
L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1;
+
L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1;
+
L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1;
+
L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1;
+
L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1; L1;
+
E; E; E; E; E; E; E; E; E; E; E; E; E; E; E; E;
+
E; E; E; E; E; E; E; E; E; E; E; E; E; E; E; E;
+
E; E; E; E; E; E; E; E; E; E; E; E; E; E; E; E;
+
E; E; E; E; E; E; E; E; E; E; E; E; E; E; E; E;
+
E; E; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2;
+
L2; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2; L2;
+
L3_E0; L3_E1_EC_or_EE_EF; L3_E1_EC_or_EE_EF; L3_E1_EC_or_EE_EF;
+
L3_E1_EC_or_EE_EF; L3_E1_EC_or_EE_EF; L3_E1_EC_or_EE_EF; L3_E1_EC_or_EE_EF;
+
L3_E1_EC_or_EE_EF; L3_E1_EC_or_EE_EF; L3_E1_EC_or_EE_EF; L3_E1_EC_or_EE_EF;
+
L3_E1_EC_or_EE_EF; L3_ED;L3_E1_EC_or_EE_EF; L3_E1_EC_or_EE_EF;
+
L4_F0; L4_F1_F3; L4_F1_F3; L4_F1_F3; L4_F4; E; E; E; E; E; E; E; E; E; E; E;
+
|]
+
end
+
+
module Tdec = struct
+
type 'a fmt = Format.formatter -> 'a -> unit
+
let pp_did_you_mean = Err_msg.pp_did_you_mean
+
let pp_and_enum = Err_msg.pp_and_enum
+
let pp_or_enum = Err_msg.pp_or_enum
+
let pp_did_you_mean = Err_msg.pp_did_you_mean
+
let pp_must_be = Err_msg.pp_must_be
+
let pp_unknown = Err_msg.pp_unknown
+
let pp_unknown' = Err_msg.pp_unknown'
+
+
(* Decoders *)
+
+
type t =
+
{ file : Tloc.fpath; i : string; tok : Buffer.t;
+
mutable pos : int; mutable line : int; mutable line_pos : int; }
+
+
let create ?(file = Tloc.no_file) i =
+
{ file; i; tok = Buffer.create 255; pos = 0; line = 1; line_pos = 0 }
+
+
(* Location *)
+
+
let file d = d.file
+
let pos d = d.pos
+
let line d = d.line, d.line_pos
+
+
let loc d ~sbyte ~ebyte ~sline ~eline =
+
Tloc.v ~file:d.file ~sbyte ~ebyte ~sline ~eline
+
+
let loc_to_here d ~sbyte ~sline =
+
loc d ~sbyte ~ebyte:d.pos ~sline ~eline:(d.line, d.line_pos)
+
+
let loc_here d = loc_to_here d ~sbyte:d.pos ~sline:(d.line, d.line_pos)
+
+
(* Errors *)
+
+
exception Err of Tloc.t * string
+
+
let err loc msg = raise_notrace (Err (loc, msg))
+
let err_to_here d ~sbyte ~sline fmt =
+
Format.kasprintf (err (loc_to_here d ~sbyte ~sline)) fmt
+
+
let err_here d fmt = Format.kasprintf (err (loc_here d)) fmt
+
let err_suggest = Err_msg.suggest
+
+
(* Lexing *)
+
+
let incr_line d = match d.i.[d.pos] with (* assert (not (eoi d)) *)
+
| '\r' -> d.line <- d.line + 1; d.line_pos <- d.pos + 1
+
| '\n' ->
+
(if d.pos = 0 || d.i.[d.pos - 1] <> '\r' then d.line <- d.line + 1);
+
d.line_pos <- d.pos + 1;
+
| _ -> ()
+
[@@ ocaml.inline]
+
+
let eoi d = d.pos >= String.length d.i [@@ ocaml.inline]
+
let byte d = if eoi d then 0xFFFF else Char.code d.i.[d.pos] [@@ ocaml.inline]
+
let accept_byte d = incr_line d; d.pos <- d.pos + 1
+
[@@ ocaml.inline]
+
+
let accept_utf_8 accept d =
+
let err d = match byte d with
+
| 0xFFFF -> err_here d "UTF-8 decoding error: unexpected end of input"
+
| b -> err_here d "UTF-8 decoding error: byte %02x illegal here" b
+
in
+
let accept_tail d = if (byte d lsr 6 = 0b10) then accept d else err d in
+
match byte d with
+
| 0xFFFF -> err d
+
| b ->
+
(* If a subsequent [byte d] invocation is 0xFFFF we get to [err]. *)
+
match Utf_8.case.(b) with
+
| L1 -> accept d
+
| L2 -> accept d; accept_tail d
+
| L3_E0 ->
+
accept d;
+
if (byte d - 0xA0 < 0xBF - 0xA0) then accept d else err d;
+
accept_tail d
+
| L3_E1_EC_or_EE_EF -> accept d; accept_tail d; accept_tail d
+
| L3_ED ->
+
accept d;
+
if (byte d - 0x80 < 0x9F - 0x80) then accept d else err d;
+
accept_tail d
+
| L4_F0 ->
+
accept d;
+
if (byte d - 0x90 < 0xBF - 0x90) then accept d else err d;
+
accept_tail d; accept_tail d
+
| L4_F1_F3 ->
+
accept d;
+
accept_tail d; accept_tail d; accept_tail d;
+
| L4_F4 ->
+
accept d;
+
if (byte d - 0x80 < 0x8F - 0x80) then accept d else err d;
+
| E -> err d
+
+
let accept_uchar d = accept_utf_8 accept_byte d
+
+
(* Tokenizer *)
+
+
let tok_reset d = Buffer.reset d.tok [@@ ocaml.inline]
+
let tok_pop d = let t = Buffer.contents d.tok in tok_reset d; t
+
[@@ ocaml.inline]
+
+
let tok_accept_byte d =
+
Buffer.add_char d.tok d.i.[d.pos]; accept_byte d; [@@ ocaml.inline]
+
+
let tok_accept_uchar d = accept_utf_8 tok_accept_byte d [@@ ocaml.inline]
+
let tok_add_byte d b = Buffer.add_char d.tok (Char.chr b) [@@ ocaml.inline]
+
let tok_add_bytes d s = Buffer.add_string d.tok s [@@ ocaml.inline]
+
let tok_add_char d c = Buffer.add_char d.tok c [@@ ocaml.inline]
+
+
let buffer_add_uchar b u = match Uchar.to_int u with
+
(* XXX From 4.06 use Buffer.add_utf_8_uchar *)
+
| u when u < 0 -> assert false
+
| u when u <= 0x007F ->
+
Buffer.add_char b (Char.unsafe_chr u)
+
| u when u <= 0x07FF ->
+
Buffer.add_char b (Char.unsafe_chr (0xC0 lor (u lsr 6)));
+
Buffer.add_char b (Char.unsafe_chr (0x80 lor (u land 0x3F)));
+
| u when u <= 0xFFFF ->
+
Buffer.add_char b (Char.unsafe_chr (0xE0 lor (u lsr 12)));
+
Buffer.add_char b (Char.unsafe_chr (0x80 lor ((u lsr 6) land 0x3F)));
+
Buffer.add_char b (Char.unsafe_chr (0x80 lor (u land 0x3F)));
+
| u when u <= 0x10FFFF ->
+
Buffer.add_char b (Char.unsafe_chr (0xF0 lor (u lsr 18)));
+
Buffer.add_char b (Char.unsafe_chr (0x80 lor ((u lsr 12) land 0x3F)));
+
Buffer.add_char b (Char.unsafe_chr (0x80 lor ((u lsr 6) land 0x3F)));
+
Buffer.add_char b (Char.unsafe_chr (0x80 lor (u land 0x3F)))
+
| _ -> assert false
+
+
let tok_add_uchar d u = buffer_add_uchar d.tok u
+
end
+
+
module Url = struct
+
type scheme = string
+
type authority = string
+
type path = string
+
type query = string
+
type fragment = string
+
type t = string
+
+
let string_subrange ?(first = 0) ?last s =
+
let max = String.length s - 1 in
+
let last = match last with
+
| None -> max
+
| Some l when l > max -> max
+
| Some l -> l
+
in
+
let first = if first < 0 then 0 else first in
+
if first > last then "" else String.sub s first (last - first + 1)
+
+
let white = function ' ' | '\t' .. '\r' -> true | _ -> false
+
let alpha = function 'A' .. 'Z' | 'a' .. 'z' -> true | _ -> false
+
let digit = function '0' .. '9' -> true | _ -> false
+
+
let scheme_char c =
+
alpha c || digit c || Char.equal c '+' || Char.equal c '-' ||
+
Char.equal '.' c
+
+
let find_scheme_colon u =
+
if u = "" || not (alpha u.[0]) then None else
+
let max = String.length u - 1 in
+
let i = ref 1 in
+
while !i <= max && scheme_char u.[!i] do incr i done;
+
if !i > max || u.[!i] <> ':' then None else Some !i
+
+
let find_authority_last ~start u =
+
let max = String.length u - 1 in
+
if start > max then None else
+
if start + 1 > max then Some (start - 1) else
+
if not (u.[start] = '/' && u.[start + 1] = '/') then Some (start - 1) else
+
let i = ref (start + 2) in
+
while (!i <= max && u.[!i] <> '/' && u.[!i] <> '?' && u.[!i] <> '#')
+
do incr i done;
+
Some (!i - 1)
+
+
let scheme u = match find_scheme_colon u with
+
| None -> None | Some i -> Some (String.sub u 0 i)
+
+
let path_first u =
+
let start = match find_scheme_colon u with
+
| None -> 0 | Some i -> i + 1
+
in
+
let first = match find_authority_last ~start u with
+
| None -> start | Some last -> last + 1
+
in
+
let max = String.length u - 1 in
+
if first > max || u.[first] = '#' || u.[first] = '?' then None else Some first
+
+
let path_last u ~first =
+
let max = String.length u - 1 in
+
let i = ref (first + 1) in
+
while (!i <= max && u.[!i] <> '?' && u.[!i] <> '#') do incr i done;
+
!i - 1
+
+
let path u = match path_first u with
+
| None -> None
+
| Some first -> Some (string_subrange ~first ~last:(path_last u ~first) u)
+
end
+
+
let escape = (* The escape rules are a bit unclear. These are those of LaTeX *)
+
let byte_replaced_length char_len s =
+
let rec loop s max i l = match i > max with
+
| true -> l
+
| false -> loop s max (i + 1) (l + char_len s.[i])
+
in
+
loop s (String.length s - 1) 0 0
+
in
+
let byte_replace set_char s ~len ~replaced_len =
+
let b = Bytes.create replaced_len in
+
let rec loop s max i k = match i > max with
+
| true -> Bytes.unsafe_to_string b
+
| false -> loop s max (i + 1) (set_char b k s.[i])
+
in
+
loop s (len - 1) 0 0
+
in
+
let byte_escaper char_len set_char s =
+
let len = String.length s in
+
let replaced_len = byte_replaced_length char_len s in
+
match replaced_len = len with
+
| true -> s
+
| false -> byte_replace set_char s ~len ~replaced_len
+
in
+
let tilde_esc = "\\textasciitilde" in
+
let tilde_len = String.length tilde_esc in
+
let circ_esc = "\\textasciicircum" in
+
let circ_len = String.length circ_esc in
+
let bslash_esc = "\\textbackslash" in
+
let bslash_len = String.length bslash_esc in
+
let char_len = function
+
| '&' | '%' | '$' | '#' | '_' | '{' | '}' -> 2
+
| '~' -> tilde_len
+
| '^' -> circ_len
+
| '\\' -> bslash_len
+
| _ -> 1
+
in
+
let set_char b i = function
+
| '&' | '%' | '$' | '#' | '_' | '{' | '}' as c ->
+
Bytes.set b i '\\'; Bytes.set b (i + 1) c; i + 2
+
| '~' -> Bytes.blit_string tilde_esc 0 b i tilde_len; i + tilde_len
+
| '^' -> Bytes.blit_string circ_esc 0 b i circ_len; i + circ_len
+
| '\\' -> Bytes.blit_string bslash_esc 0 b i bslash_len; i + bslash_len
+
| c -> Bytes.set b i c; i + 1
+
in
+
byte_escaper char_len set_char
+
+
(* TODO unescape on decode. *)
+
+
type t =
+
{ type' : string;
+
cite_key : string;
+
fields : string SM.t;
+
loc : Tloc.t; }
+
+
let v ~type' ~cite_key ~fields () = { type'; cite_key; fields; loc = Tloc.nil }
+
+
let type' e = e.type'
+
let cite_key e = e.cite_key
+
let fields e = e.fields
+
let loc e = e.loc
+
let pp ppf e =
+
let pp_field ppf (k, v) = Fmt.pf ppf "@[<h>%s = {%s}@]" k (escape v) in
+
Fmt.pf ppf "@[<v2>@%s{%s,@,%a}@]" e.type' e.cite_key
+
(Fmt.iter_bindings ~sep:Fmt.comma SM.iter pp_field) e.fields
+
+
(* Field values *)
+
+
let list_value s =
+
List.filter (fun s -> s <> "") @@
+
List.map String.trim (String.split_on_char ',' s)
+
+
let doi e = match SM.find_opt "doi" e.fields with
+
| None -> None
+
| Some doi ->
+
let ret doi = match String.trim doi with
+
| "" -> None
+
| doi -> Some doi
+
in
+
(* chop scheme and authority in case there is one *)
+
match Url.scheme doi with
+
| None -> ret doi
+
| Some _ ->
+
match Url.path doi with
+
| None -> ret doi
+
| Some p -> ret p
+
+
let keywords e = Option.map list_value (SM.find_opt "keywords" e.fields)
+
let annote e = SM.find_opt "annote" e.fields
+
+
(* Codec *)
+
+
type error_kind = string
+
type error = error_kind * Tloc.t
+
+
let pp_error ppf (err, l) =
+
Fmt.pf ppf "@[<v>%a:@,%a: %s@]"
+
Tloc.pp l Fmt.string "Error" err
+
+
let curr_char d = (* TODO better escaping (this is for error reports) *)
+
Tdec.tok_reset d; Tdec.tok_accept_uchar d; Tdec.tok_pop d
+
+
let err_illegal_uchar d = Tdec.err_here d "illegal character: %s" (curr_char d)
+
let err_illegal_byte d b = Tdec.err_here d "illegal character U+%04X" b
+
let err_expected d exp = Tdec.err_here d "expected %s" exp
+
let err_eoi msg d ~sbyte ~sline =
+
Tdec.err_to_here d ~sbyte ~sline "end of input: %s" msg
+
+
let err_eoi_entry = err_eoi "unclosed BibTeX entry"
+
let err_eoi_field = err_eoi "unfinished BibTeX entry field"
+
let err_eoi_value = err_eoi "unfinished BibTeX field value"
+
let err_brace d ~sbyte ~sline =
+
Tdec.err_to_here d ~sbyte ~sline "incorrect brace {} nesting"
+
+
let dec_byte d = match Tdec.byte d with
+
| c when 0x00 <= c && c <= 0x08 || 0x0E <= c && c <= 0x1F || c = 0x7F ->
+
err_illegal_byte d c
+
| c -> c
+
[@@ ocaml.inline]
+
+
let rec skip_white d = match dec_byte d with
+
| 0x20 | 0x09 | 0x0A | 0x0B | 0x0C | 0x0D -> Tdec.accept_byte d; skip_white d
+
| _ -> ()
+
+
let dec_token ~stop d =
+
let rec loop d = match dec_byte d with
+
| 0x28 | 0x29 | 0x3B | 0x22
+
| 0x20 | 0x09 | 0x0A | 0x0B | 0x0C | 0x0D
+
| 0xFFFF -> Tdec.tok_pop d
+
| c when c = stop -> Tdec.tok_pop d
+
| _ -> Tdec.tok_accept_uchar d; loop d
+
in
+
loop d
+
+
let rec dec_string ~sbyte ~sline ~stop d = match dec_byte d with
+
| 0xFFFF -> err_eoi_value ~sbyte ~sline d
+
| c when c = stop -> Tdec.accept_byte d; Tdec.tok_pop d
+
| _ -> Tdec.tok_accept_uchar d; dec_string ~sbyte ~sline ~stop d
+
+
let rec dec_tex i ~sbyte ~sline d = match dec_byte d with
+
| 0xFFFF -> err_eoi_value ~sbyte ~sline d
+
| 0x007D ->
+
if i = 0 then (Tdec.accept_byte d; Tdec.tok_pop d) else
+
(Tdec.tok_accept_uchar d; dec_tex (i - 1) ~sbyte ~sline d)
+
| c ->
+
let i = if c = 0x007B then i + 1 else i in
+
Tdec.tok_accept_uchar d; dec_tex i ~sbyte ~sline d
+
+
let dec_value d =
+
let sbyte = Tdec.pos d and sline = Tdec.line d in
+
match dec_byte d with
+
| 0x007B (* { *) -> Tdec.accept_byte d; dec_tex 0 ~sbyte ~sline d
+
| 0x0022 -> Tdec.accept_byte d; dec_string ~sbyte ~sline ~stop:0x0022 d
+
| _ -> dec_token ~stop:0x002C d
+
+
let dec_field d acc =
+
let sbyte = Tdec.pos d and sline = Tdec.line d in
+
let id = dec_token ~stop:0x003D (* = *) d in
+
skip_white d;
+
match dec_byte d with
+
| 0xFFFF -> err_eoi_field ~sbyte ~sline d
+
| 0x003D (* = *) ->
+
Tdec.accept_byte d;
+
skip_white d;
+
begin match dec_byte d with
+
| 0xFFFF -> err_eoi_field ~sbyte ~sline d
+
| _ ->
+
SM.add (String.lowercase_ascii id) (dec_value d) acc
+
end
+
| _ -> err_expected d "'='"
+
+
let rec dec_fields ~sbyte ~sline d acc =
+
skip_white d;
+
match dec_byte d with
+
| 0xFFFF -> err_eoi_entry ~sbyte ~sline d
+
| 0x007D (* } *) -> acc
+
| _ ->
+
let acc = dec_field d acc in
+
skip_white d;
+
match dec_byte d with
+
| 0x002C (* , *) -> Tdec.accept_byte d; dec_fields ~sbyte ~sline d acc
+
| 0x007D (* } *) -> acc
+
| 0xFFFF -> err_eoi_entry ~sbyte ~sline d
+
| b -> err_expected d "',' or '}'"
+
+
let dec_entry d =
+
let sbyte = Tdec.pos d and sline = Tdec.line d in
+
Tdec.accept_byte d (* @ *);
+
let type' = dec_token ~stop:0x007B d (* { *) in
+
match dec_byte d with
+
| 0x007B ->
+
Tdec.accept_byte d;
+
let cite_key = dec_token ~stop:0x002C d (* , *) in
+
skip_white d;
+
begin match dec_byte d with
+
| 0x002C (* , *) ->
+
Tdec.accept_byte d;
+
let fields = dec_fields ~sbyte ~sline d SM.empty in
+
let loc = Tdec.loc_to_here d ~sbyte ~sline in
+
Tdec.accept_byte d;
+
{ type'; cite_key; fields; loc }
+
| _ -> err_expected d "','"
+
end
+
| _ -> err_expected d "'{'"
+
+
let dec_entries d =
+
let rec loop d acc =
+
skip_white d;
+
match dec_byte d with
+
| 0x0040 (* @ *) -> loop d (dec_entry d :: acc)
+
| 0xFFFF -> List.rev acc
+
| b -> err_illegal_uchar d
+
in
+
loop d []
+
+
let of_string ?(file = Fpath.v "-") s =
+
try
+
let file = Fpath.to_string file in
+
let d = Tdec.create ~file s in
+
Ok (dec_entries d)
+
with Tdec.Err (loc, msg) -> Error (msg, loc)
+
+
let of_string' ?file s =
+
Result.map_error (fun e -> Fmt.str "%a" pp_error e) @@
+
(of_string ?file s)
+
+
let to_string es = Fmt.str "@[<v>%a@]" (Fmt.list pp) es
+
+
(*---------------------------------------------------------------------------
+
Copyright (c) 2019 University of Bern
+
+
Permission to use, copy, modify, and/or distribute this software for any
+
purpose with or without fee is hereby granted, provided that the above
+
copyright notice and this permission notice appear in all copies.
+
+
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
---------------------------------------------------------------------------*)
+92
stack/zotero-translation/bibtex.mli
···
+
(*---------------------------------------------------------------------------
+
Copyright (c) 2019 University of Bern. All rights reserved.
+
Distributed under the ISC license, see terms at the end of the file.
+
---------------------------------------------------------------------------*)
+
+
(** {{:https://www.ctan.org/pkg/bibtex}BibT{_E}X} codec.
+
+
{b Limitations.} At the moment [@string], [@preamble]
+
and [@comment] are not supported. For values we assume UTF-8 without
+
escape sequences. Nested braces are handled though. *)
+
+
val escape : string -> string
+
(** [escape s] escapes [s] for BibT{_E}X. *)
+
+
type t
+
(** The type for bibtex entries. *)
+
+
module Tloc : sig
+
type t
+
end
+
+
module SM : Map.S with type key := string
+
+
val v :
+
type':string -> cite_key:string -> fields:string SM.t -> unit -> t
+
(** [v ~type' ~id ~fields] is an entry of type [type'], identifier [id],
+
and with field [fields]. *)
+
+
val type' : t -> string
+
(** [type' e] is the type of entry. *)
+
+
val cite_key : t -> string
+
(** [cite_key e] is the citation key of the entry. *)
+
+
val fields : t -> string SM.t
+
(** [fields e] are the BibTeX fields. Fields are lowercased according
+
to {!B0_std.String.Ascii.lowercase}. *)
+
+
val pp : t Fmt.t
+
(** [pp] formats an entry using BibT{_E}X syntax. *)
+
+
(** {1:fields Field queries} *)
+
+
val list_value : string -> string list
+
(** [list_value] splits on comma and trims the results. *)
+
+
val doi : t -> string option
+
(** [doi e] is the [doi] field of e. Note that if the field happens to
+
hold an URI, the scheme and authority are stripped. *)
+
+
val keywords : t -> string list option
+
(** [keywords e] is the comma seperated [keywords] field. *)
+
+
val annote : t -> string option
+
(** [annote e] is the [annote] field. *)
+
+
(** {1:codec Codec} *)
+
+
type error_kind
+
(** The type for kinds of decoding errors. *)
+
+
type error = error_kind * Tloc.t
+
(** The type for errors. The error and its location. *)
+
+
val pp_error : error Fmt.t
+
+
val of_string : ?file:Fpath.t -> string -> (t list, error) result
+
(** [of_string ~file s] parses entries from [s] assuming it
+
was read from [file] (defaults to {!B0_std.Fpath.dash}). *)
+
+
val of_string' : ?file:Fpath.t -> string -> (t list, string) result
+
(** [of_string'] is like {!of_string} but converts the error to an
+
error message. *)
+
+
val to_string : t list -> string
+
(** [to_string es] formats the list of entries using BibT{_E}X syntax. *)
+
+
(*---------------------------------------------------------------------------
+
Copyright (c) 2019 University of Bern
+
+
Permission to use, copy, modify, and/or distribute this software for any
+
purpose with or without fee is hereby granted, provided that the above
+
copyright notice and this permission notice appear in all copies.
+
+
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
---------------------------------------------------------------------------*)
+4
stack/zotero-translation/dune
···
+
(library
+
(name zotero_translation)
+
(public_name zotero-translation)
+
(libraries astring cohttp-lwt-unix ezjsonm http fpath))
+21
stack/zotero-translation/dune-project
···
+
(lang dune 3.17)
+
(name zotero-translation)
+
+
(source (github avsm/zotero-translation))
+
(license ISC)
+
(authors "Anil Madhavapeddy")
+
(maintainers "anil@recoil.org")
+
+
(generate_opam_files true)
+
+
(package
+
(name zotero-translation)
+
(synopsis "API client to the Zotero translation server")
+
(description "This is all still a work in progress")
+
(depends
+
(ocaml (>= "5.1.0"))
+
uri
+
http
+
cohttp-lwt-unix
+
ezjsonm
+
yaml))
+34
stack/zotero-translation/zotero-translation.opam
···
+
# This file is generated by dune, edit dune-project instead
+
opam-version: "2.0"
+
synopsis: "API client to the Zotero translation server"
+
description: "This is all still a work in progress"
+
maintainer: ["anil@recoil.org"]
+
authors: ["Anil Madhavapeddy"]
+
license: "ISC"
+
homepage: "https://github.com/avsm/zotero-translation"
+
bug-reports: "https://github.com/avsm/zotero-translation/issues"
+
depends: [
+
"dune" {>= "3.17"}
+
"ocaml" {>= "5.1.0"}
+
"uri"
+
"http"
+
"cohttp-lwt-unix"
+
"ezjsonm"
+
"yaml"
+
"odoc" {with-doc}
+
]
+
build: [
+
["dune" "subst"] {dev}
+
[
+
"dune"
+
"build"
+
"-p"
+
name
+
"-j"
+
jobs
+
"@install"
+
"@runtest" {with-test}
+
"@doc" {with-doc}
+
]
+
]
+
dev-repo: "git+https://github.com/avsm/zotero-translation.git"
+275
stack/zotero-translation/zotero_translation.ml
···
+
(** Resolve a DOI from a Zotero translation server *)
+
+
module C = Cohttp
+
module CL = Cohttp_lwt
+
module CLU = Cohttp_lwt_unix.Client
+
module J = Ezjsonm
+
+
(* From the ZTS source code: https://github.com/zotero/translation-server/blob/master/src/formats.js
+
bibtex: "9cb70025-a888-4a29-a210-93ec52da40d4",
+
biblatex: "b6e39b57-8942-4d11-8259-342c46ce395f",
+
bookmarks: "4e7119e0-02be-4848-86ef-79a64185aad8",
+
coins: "05d07af9-105a-4572-99f6-a8e231c0daef",
+
csljson: "bc03b4fe-436d-4a1f-ba59-de4d2d7a63f7",
+
csv: "25f4c5e2-d790-4daa-a667-797619c7e2f2",
+
endnote_xml: "eb7059a4-35ec-4961-a915-3cf58eb9784b",
+
evernote: "18dd188a-9afc-4cd6-8775-1980c3ce0fbf",
+
mods: "0e2235e7-babf-413c-9acf-f27cce5f059c",
+
rdf_bibliontology: "14763d25-8ba0-45df-8f52-b8d1108e7ac9",
+
rdf_dc: "6e372642-ed9d-4934-b5d1-c11ac758ebb7",
+
rdf_zotero: "14763d24-8ba0-45df-8f52-b8d1108e7ac9",
+
refer: "881f60f2-0802-411a-9228-ce5f47b64c7d",
+
refworks_tagged: "1a3506da-a303-4b0a-a1cd-f216e6138d86",
+
ris: "32d59d2d-b65a-4da4-b0a3-bdd3cfb979e7",
+
tei: "032ae9b7-ab90-9205-a479-baf81f49184a",
+
wikipedia: "3f50aaac-7acc-4350-acd0-59cb77faf620"
+
*)
+
type format =
+
| Bibtex
+
| Biblatex
+
| Bookmarks
+
| Coins
+
| Csljson
+
| Csv
+
| Endnote_xml
+
| Evernote
+
| Mods
+
| Rdf_bibliontology
+
| Rdf_dc
+
| Rdf_zotero
+
| Refer
+
| Refworks_tagged
+
| Ris
+
| Tei
+
| Wikipedia
+
+
let format_to_string = function
+
| Bibtex -> "bibtex"
+
| Biblatex -> "biblatex"
+
| Bookmarks -> "bookmarks"
+
| Coins -> "coins"
+
| Csljson -> "csljson"
+
| Csv -> "csv"
+
| Endnote_xml -> "endnote_xml"
+
| Evernote -> "evernote"
+
| Mods -> "mods"
+
| Rdf_bibliontology -> "rdf_bibliontology"
+
| Rdf_dc -> "rdf_dc"
+
| Rdf_zotero -> "rdf_zotero"
+
| Refer -> "refer"
+
| Refworks_tagged -> "refworks_tagged"
+
| Ris -> "ris"
+
| Tei -> "tei"
+
| Wikipedia -> "wikipedia"
+
+
let format_of_string = function
+
| "bibtex" -> Some Bibtex
+
| "biblatex" -> Some Biblatex
+
| "bookmarks" -> Some Bookmarks
+
| "coins" -> Some Coins
+
| "csljson" -> Some Csljson
+
| "csv" -> Some Csv
+
| "endnote_xml" -> Some Endnote_xml
+
| "evernote" -> Some Evernote
+
| "mods" -> Some Mods
+
| "rdf_bibliontology" -> Some Rdf_bibliontology
+
| "rdf_dc" -> Some Rdf_dc
+
| "rdf_zotero" -> Some Rdf_zotero
+
| "refer" -> Some Refer
+
| "refworks_tagged" -> Some Refworks_tagged
+
| "ris" -> Some Ris
+
| "tei" -> Some Tei
+
| "wikipedia" -> Some Wikipedia
+
| _ -> None
+
+
let web_endp base_uri =
+
match String.ends_with ~suffix:"/" base_uri with
+
| true -> Uri.of_string (base_uri ^ "web")
+
| false -> Uri.of_string (base_uri ^ "/web")
+
+
let export_endp base_uri =
+
match String.ends_with ~suffix:"/" base_uri with
+
| true -> Uri.of_string (base_uri ^ "export")
+
| false -> Uri.of_string (base_uri ^ "/export")
+
+
let search_endp base_uri =
+
match String.ends_with ~suffix:"/" base_uri with
+
| true -> Uri.of_string (base_uri ^ "search")
+
| false -> Uri.of_string (base_uri ^ "/search")
+
+
let _import_endp base_uri =
+
match String.ends_with ~suffix:"/" base_uri with
+
| true -> Uri.of_string (base_uri ^ "import")
+
| false -> Uri.of_string (base_uri ^ "/import")
+
+
open Lwt.Infix
+
+
(* The Eio version has more in here, hence I'm just keeping this around. *)
+
type t = {
+
base_uri: string;
+
}
+
+
let v base_uri = { base_uri }
+
+
let resolve_doi { base_uri } doi =
+
let body = "https://doi.org/" ^ doi in
+
let doi_body = CL.Body.of_string body in
+
let headers = C.Header.init_with "content-type" "text/plain" in
+
let uri = web_endp base_uri in
+
CLU.call ~headers ~body:doi_body `POST uri >>= fun (resp, body) ->
+
let status = C.Response.status resp in
+
body |> Cohttp_lwt.Body.to_string >>= fun body ->
+
if status = `OK then begin
+
try
+
let doi_json = J.from_string body in
+
Lwt.return_ok doi_json
+
with exn -> Lwt.return_error (`Msg (Printexc.to_string exn))
+
end else
+
Lwt.return_error (`Msg (Format.asprintf "Unexpected HTTP status: %a for %s" Http.Status.pp status body))
+
+
let resolve_url { base_uri } url =
+
let url_body = CL.Body.of_string url in
+
let headers = C.Header.init_with "content-type" "text/plain" in
+
let uri = web_endp base_uri in
+
CLU.call ~headers ~body:url_body `POST uri >>= fun (resp, body) ->
+
let status = C.Response.status resp in
+
body |> Cohttp_lwt.Body.to_string >>= fun body ->
+
if status = `OK then begin
+
try
+
let url_json = J.from_string body in
+
Lwt.return_ok url_json
+
with exn -> Lwt.return_error (`Msg (Printexc.to_string exn))
+
end else
+
Lwt.return_error (`Msg (Format.asprintf "Unexpected HTTP status: %a for %s" Http.Status.pp status body))
+
+
let search_id { base_uri} doi =
+
let body = "https://doi.org/" ^ doi in
+
let doi_body = CL.Body.of_string body in
+
let headers = C.Header.init_with "content-type" "text/plain" in
+
let uri = search_endp base_uri in
+
CLU.call ~headers ~body:doi_body `POST uri >>= fun (resp, body) ->
+
let status = C.Response.status resp in
+
body |> Cohttp_lwt.Body.to_string >>= fun body ->
+
if status = `OK then begin
+
try
+
let doi_json = J.from_string body in
+
Lwt.return_ok doi_json
+
with exn -> Lwt.return_error (`Msg (Printexc.to_string exn))
+
end else
+
Lwt.return_error (`Msg (Format.asprintf "Unexpected HTTP status: %a for %s" Http.Status.pp status body))
+
+
let export {base_uri} format api =
+
let body = CL.Body.of_string (J.to_string api) in
+
let headers = C.Header.init_with "content-type" "application/json" in
+
let uri = Uri.with_query' (export_endp base_uri ) ["format", (format_to_string format)] in
+
CLU.call ~headers ~body `POST uri >>= fun (resp, body) ->
+
let status = C.Response.status resp in
+
body |> Cohttp_lwt.Body.to_string >>= fun body ->
+
if status = `OK then begin
+
try
+
match format with
+
| Bibtex -> Lwt.return_ok (Astring.String.trim body)
+
| _ -> Lwt.return_ok body
+
with exn -> Lwt.return_error (`Msg (Printexc.to_string exn))
+
end else
+
Lwt.return_error (`Msg (Format.asprintf "Unexpected HTTP status: %a for %s" Http.Status.pp status body))
+
+
let unescape_hex s =
+
let buf = Buffer.create (String.length s) in
+
let rec aux i =
+
if i >= String.length s then
+
Buffer.contents buf
+
else
+
if s.[i] = '\\' && i+3 < String.length s && s.[i+1] = 'x' then
+
let hex = String.sub s (i+2) 2 in
+
let char_code = int_of_string ("0x" ^ hex) in
+
Buffer.add_char buf (char_of_int char_code);
+
aux (i+4)
+
else begin
+
Buffer.add_char buf s.[i];
+
aux (i+1)
+
end
+
in aux 0
+
+
let unescape_bibtex s =
+
unescape_hex s |>
+
String.split_on_char '{' |> String.concat "" |>
+
String.split_on_char '}' |> String.concat ""
+
+
let fields_of_bib bib =
+
match Bibtex.of_string bib with
+
| Error e ->
+
prerr_endline bib;
+
Fmt.epr "%a\n%!" Bibtex.pp_error e;
+
Lwt.fail_with "bib parse err TODO"
+
| Ok [bib] ->
+
let f = Bibtex.fields bib |> Bibtex.SM.bindings |> List.map (fun (k,v) -> k, (unescape_bibtex v)) in
+
let ty = match Bibtex.type' bib with "inbook" -> "book" | x -> x in
+
let v = List.fold_left (fun acc (k,v) -> (k,(`String v))::acc) ["bibtype",`String ty] f in
+
Lwt.return v
+
| Ok _ -> Lwt.fail_with "one bib at a time plz"
+
+
let bib_of_doi zt doi =
+
prerr_endline ("Fetching " ^ doi);
+
let v = resolve_doi zt doi >>= function
+
| Ok r ->
+
Lwt.return r
+
| Error (`Msg _) ->
+
Printf.eprintf "%s failed on /web, trying to /search\n%!" doi;
+
search_id zt doi >>= function
+
| Error (`Msg e) -> Lwt.fail_with e
+
| Ok r ->
+
Lwt.return r
+
in
+
v >>= fun v ->
+
export zt Bibtex v >>= function
+
| Error (`Msg e) -> Lwt.fail_with e
+
| Ok r ->
+
print_endline r;
+
Lwt.return r
+
+
let split_authors keys =
+
let authors =
+
List.assoc "author" keys |> J.get_string |>
+
Astring.String.cuts ~empty:false ~sep:" and " |>
+
List.map Bibtex.list_value |>
+
List.map (fun v -> List.rev v |> String.concat " ") |>
+
List.map (fun x -> `String x)
+
in
+
let keywords =
+
List.assoc_opt "keywords" keys |> function
+
| None -> []
+
| Some k ->
+
Astring.String.cuts ~empty:false ~sep:", " (J.get_string k) |>
+
List.map (fun x -> `String x)
+
in
+
J.update (`O keys) ["author"] (Some (`A authors)) |> fun j ->
+
J.update j ["keywords"] (match keywords with [] -> None | _ -> Some (`A keywords))
+
+
let add_bibtex ~slug y =
+
let (.%{}) = fun y k -> J.find y [k] in
+
let add_if_present k f m =
+
match J.find y [k] with
+
| v -> Bibtex.SM.add k (f v) m
+
| exception Not_found -> m in
+
let string k m = add_if_present k J.get_string m in
+
let authors m = add_if_present "author" (fun j -> J.get_list J.get_string j |> String.concat " and ") m in
+
let cite_key = Astring.String.map (function '-' -> '_' |x -> x) slug in
+
let fields = Bibtex.SM.empty in
+
let type' = y.%{"bibtype"} |> J.get_string |> String.lowercase_ascii in
+
let fields = authors fields |> string "title" |> string "doi" |> string "month" |> string "year" |> string "url" in
+
let fields = match type' with
+
| "article" -> string "journal" fields |> string "volume" |> string "number" |> string "pages"
+
| "inproceedings" | "incollection" -> string "booktitle" fields |> string "editor" |> string "address" |> string "series" |>
+
string "number" |> string "volume" |> string "organization" |> string "publisher" |> string "pages"
+
| "book" -> string "editor" fields |> string "publisher" |> string "volume" |> string "pages"
+
| "misc" -> string "howpublished" fields
+
| "techreport" -> string "institution" fields |> string "number" |> string "address"
+
| b -> prerr_endline ("unknown bibtype " ^ b); fields in
+
Bibtex.v ~type' ~cite_key ~fields () |> Fmt.str "%a" Bibtex.pp |>
+
fun bib -> J.update y ["bib"] (Some (`String bib))
+
+
let json_of_doi zt ~slug doi =
+
bib_of_doi zt doi >>= fun x ->
+
fields_of_bib x >>= fun x ->
+
Lwt.return (split_authors x |> add_bibtex ~slug)
+37
stack/zotero-translation/zotero_translation.mli
···
+
(** {1 Interface to the Zotero Translation Server} *)
+
+
type t
+
+
type format =
+
| Bibtex
+
| Biblatex
+
| Bookmarks
+
| Coins
+
| Csljson
+
| Csv
+
| Endnote_xml
+
| Evernote
+
| Mods
+
| Rdf_bibliontology
+
| Rdf_dc
+
| Rdf_zotero
+
| Refer
+
| Refworks_tagged
+
| Ris
+
| Tei
+
| Wikipedia
+
+
val format_to_string: format -> string
+
val format_of_string: string -> format option
+
+
val v : string -> t
+
+
val resolve_doi: t -> string -> ([>Ezjsonm.t], [>`Msg of string]) Lwt_result.t
+
+
val resolve_url: t -> string -> ([>Ezjsonm.t], [>`Msg of string]) Lwt_result.t
+
+
val search_id: t -> string -> ([>Ezjsonm.t], [>`Msg of string]) Lwt_result.t
+
+
val export: t -> format -> Ezjsonm.t -> (string, [>`Msg of string]) Lwt_result.t
+
+
val json_of_doi : t -> slug:string -> string -> Ezjsonm.value Lwt.t