#!/usr/bin/env bash # This script is for testing the quality of generated responses to basic # knowledge questions from the Gemma3:27b LLM model. # # It requires an Ollama server with the gemma3:27b model available, either # defined by `export OLLAMA_SERVER=` or it defaults to localhost:11434. # # Dependencies: # - curl # - gum # - glow # - jq # - ollama set -euo pipefail OLLAMA_SERVER="${OLLAMA_SERVER:=localhost:11434}" MESSAGE=$(gum write --placeholder="Ask Gemma3...") gum style --padding="1 2" --border-foreground="#a6e3a1" --border="normal" "$MESSAGE" RESPONSE=$(gum spin -s "dot" --title="Generating response..." --show-output -- curl -s -S -H "Content-Type: application/json" -X POST -d '{ "model": "gemma3:27b", "messages": [{"role": "user", "content": "'"$MESSAGE"'"}], "stream": false }' "http://${OLLAMA_SERVER}/api/chat") printf '%s' "$RESPONSE" | jq -r ".message.content" | glow -p