Assorted shell and Python scripts
at main 912 B view raw
1#!/usr/bin/env bash 2 3# This script is for testing the quality of generated responses to basic 4# knowledge questions from the Gemma3:27b LLM model. 5# 6# It requires an Ollama server with the gemma3:27b model available, either 7# defined by `export OLLAMA_SERVER=` or it defaults to localhost:11434. 8# 9# Dependencies: 10# - curl 11# - gum 12# - glow 13# - jq 14# - ollama 15 16set -euo pipefail 17 18OLLAMA_SERVER="${OLLAMA_SERVER:=localhost:11434}" 19 20MESSAGE=$(gum write --placeholder="Ask Gemma3...") 21 22gum style --padding="1 2" --border-foreground="#a6e3a1" --border="normal" "$MESSAGE" 23 24RESPONSE=$(gum spin -s "dot" --title="Generating response..." --show-output -- curl -s -S -H "Content-Type: application/json" -X POST -d '{ 25 "model": "gemma3:27b", 26 "messages": [{"role": "user", "content": "'"$MESSAGE"'"}], 27 "stream": false 28}' "http://${OLLAMA_SERVER}/api/chat") 29 30printf '%s' "$RESPONSE" | jq -r ".message.content" | glow -p