Assorted shell and Python scripts
1#!/usr/bin/env bash 2 3# This script is for testing the quality of generated responses to basic 4# knowledge questions from the Gemma3:27b LLM model. 5# 6# It requires an Ollama server with the gemma3:27b model available, either 7# defined by `export OLLAMA_SERVER=` or it defaults to localhost:11434. 8# 9# Dependencies: 10# - curl 11# - gum 12# - glow 13# - jq 14# - ollama 15 16set -euo pipefail 17 18OLLAMA_SERVER="${OLLAMA_SERVER:=localhost:11434}" 19 20MESSAGE=$(gum write --placeholder="Ask Gemma3...") 21 22RESPONSE=$(gum spin -s "dot" --title="Generating response..." --show-output -- curl -s -S -H "Content-Type: application/json" -X POST -d '{ 23 "model": "gemma3:27b", 24 "messages": [{"role": "user", "content": "'"$MESSAGE"'"}], 25 "stream": false 26}' "http://${OLLAMA_SERVER}/api/chat") 27 28printf '%s' "$RESPONSE" | jq -r ".message.content" | glow -p