|
| 1 | +"""Shell completion generation for openai CLI.""" |
| 2 | + |
| 3 | +from __future__ import annotations |
| 4 | + |
| 5 | +import argparse |
| 6 | +from typing import Optional |
| 7 | + |
| 8 | + |
| 9 | +COMPLETION_SCRIPTS = { |
| 10 | + "bash": """\ |
| 11 | +# Bash completion for openai CLI |
| 12 | +# Add to ~/.bashrc: source /path/to/openai.bash |
| 13 | +# Or copy to /etc/bash_completion.d/openai |
| 14 | +
|
| 15 | +_openai_completions() |
| 16 | +{ |
| 17 | + local cur prev commands |
| 18 | + COMPREPLY=() |
| 19 | + cur="${COMP_WORDS[COMP_CWORD]}" |
| 20 | + prev="${COMP_WORDS[COMP_CWORD-1]}" |
| 21 | +
|
| 22 | + commands="api tools" |
| 23 | + api_commands="chat image audio files models completions fine_tuning" |
| 24 | + tools_commands="fine_tunes" |
| 25 | +
|
| 26 | + case ${COMP_CWORD} in |
| 27 | + 1) |
| 28 | + COMPREPLY=( $(compgen -W "${commands}" -- ${cur}) ) |
| 29 | + return 0 |
| 30 | + ;; |
| 31 | + 2) |
| 32 | + case ${prev} in |
| 33 | + api) |
| 34 | + COMPREPLY=( $(compgen -W "${api_commands}" -- ${cur}) ) |
| 35 | + return 0 |
| 36 | + ;; |
| 37 | + tools) |
| 38 | + COMPREPLY=( $(compgen -W "${tools_commands}" -- ${cur}) ) |
| 39 | + return 0 |
| 40 | + ;; |
| 41 | + esac |
| 42 | + ;; |
| 43 | + *) |
| 44 | + # Complete model names for --model option |
| 45 | + if [[ ${prev} == "--model" ]]; then |
| 46 | + local models="gpt-4o gpt-4o-mini gpt-4-turbo gpt-4 gpt-3.5-turbo o1 o1-mini o1-pro o3 o3-mini o4-mini dall-e-3 dall-e-2 whisper-1 tts-1 tts-1-hd" |
| 47 | + COMPREPLY=( $(compgen -W "${models}" -- ${cur}) ) |
| 48 | + return 0 |
| 49 | + fi |
| 50 | +
|
| 51 | + # Complete voice options |
| 52 | + if [[ ${prev} == "--voice" ]]; then |
| 53 | + local voices="alloy echo fable onyx nova shimmer" |
| 54 | + COMPREPLY=( $(compgen -W "${voices}" -- ${cur}) ) |
| 55 | + return 0 |
| 56 | + fi |
| 57 | +
|
| 58 | + # Complete size options |
| 59 | + if [[ ${prev} == "--size" ]]; then |
| 60 | + local sizes="256x256 512x512 1024x1024 1792x1024 1024x1792" |
| 61 | + COMPREPLY=( $(compgen -W "${sizes}" -- ${cur}) ) |
| 62 | + return 0 |
| 63 | + fi |
| 64 | +
|
| 65 | + # Default: complete with files |
| 66 | + COMPREPLY=( $(compgen -f -- ${cur}) ) |
| 67 | + ;; |
| 68 | + esac |
| 69 | +} |
| 70 | +
|
| 71 | +complete -F _openai_completions openai |
| 72 | +""", |
| 73 | + "zsh": """\ |
| 74 | +#compdef openai |
| 75 | +
|
| 76 | +# Zsh completion for openai CLI |
| 77 | +# Add to ~/.zshrc: autoload -Uz compinit && compinit |
| 78 | +
|
| 79 | +_openai() { |
| 80 | + local -a commands api_commands tools_commands models voices sizes formats qualities styles purposes |
| 81 | +
|
| 82 | + commands=( |
| 83 | + 'api:Direct API calls' |
| 84 | + 'tools:Client side tools for convenience' |
| 85 | + ) |
| 86 | +
|
| 87 | + api_commands=( |
| 88 | + 'chat:Chat completions API' |
| 89 | + 'image:Image generation API' |
| 90 | + 'audio:Audio/speech API' |
| 91 | + 'files:File management API' |
| 92 | + 'models:Model management API' |
| 93 | + 'completions:Text completions API' |
| 94 | + 'fine_tuning:Fine-tuning jobs API' |
| 95 | + ) |
| 96 | +
|
| 97 | + tools_commands=( |
| 98 | + 'fine_tunes:Fine-tuning tools' |
| 99 | + ) |
| 100 | +
|
| 101 | + models=( |
| 102 | + 'gpt-4o:GPT-4o - Most capable model' |
| 103 | + 'gpt-4o-mini:GPT-4o Mini - Fast and efficient' |
| 104 | + 'gpt-4-turbo:GPT-4 Turbo - With vision' |
| 105 | + 'gpt-4:GPT-4 - Original GPT-4' |
| 106 | + 'gpt-3.5-turbo:GPT-3.5 Turbo - Fast and cheap' |
| 107 | + 'o1:O1 - Reasoning model' |
| 108 | + 'o1-mini:O1 Mini - Fast reasoning' |
| 109 | + 'o1-pro:O1 Pro - Enhanced reasoning' |
| 110 | + 'o3:O3 - Latest reasoning' |
| 111 | + 'o3-mini:O3 Mini - Fast latest reasoning' |
| 112 | + 'o4-mini:O4 Mini - Latest fast reasoning' |
| 113 | + 'dall-e-3:DALL-E 3 - Image generation' |
| 114 | + 'dall-e-2:DALL-E 2 - Image generation' |
| 115 | + 'whisper-1:Whisper - Speech to text' |
| 116 | + 'tts-1:TTS - Text to speech' |
| 117 | + 'tts-1-hd:TTS HD - High quality speech' |
| 118 | + ) |
| 119 | +
|
| 120 | + voices=(alloy echo fable onyx nova shimmer) |
| 121 | + sizes=(256x256 512x512 1024x1024 1792x1024 1024x1792) |
| 122 | + formats=(json url b64 json_object text) |
| 123 | + qualities=(standard hd) |
| 124 | + styles=(vivid natural) |
| 125 | + purposes=(fine-tune assistants) |
| 126 | +
|
| 127 | + _arguments -C \\ |
| 128 | + '(-h --help)'{-h,--help}'[Show help]' \\ |
| 129 | + '(-v --verbose)'{-v,--verbose}'[Set verbosity]' \\ |
| 130 | + '(-b --api-base)'{-b,--api-base}'[API base URL]:url:_urls' \\ |
| 131 | + '(-k --api-key)'{-k,--api-key}'[API key]:key:' \\ |
| 132 | + '(-p --proxy)'{-p,--proxy}'[Proxy]:proxy:' \\ |
| 133 | + '(-o --organization)'{-o,--organization}'[Organization]:org:' \\ |
| 134 | + '(-V --version)'{-V,--version}'[Show version]' \\ |
| 135 | + '1: :->cmd' \\ |
| 136 | + '*:: :->args' |
| 137 | +
|
| 138 | + case $state in |
| 139 | + cmd) |
| 140 | + _describe -t commands 'openai command' commands |
| 141 | + ;; |
| 142 | + args) |
| 143 | + case $words[1] in |
| 144 | + api) |
| 145 | + _arguments '1: :->api_cmd' '*:: :->api_args' |
| 146 | + case $state in |
| 147 | + api_cmd) |
| 148 | + _describe -t commands 'api command' api_commands |
| 149 | + ;; |
| 150 | + api_args) |
| 151 | + case $words[1] in |
| 152 | + chat) |
| 153 | + _arguments \\ |
| 154 | + '--model[Model]:model:->models' \\ |
| 155 | + '--messages[Messages JSON]:messages:' \\ |
| 156 | + '--temperature[Temperature]:temperature:' \\ |
| 157 | + '--top-p[Top P]:top_p:' \\ |
| 158 | + '--n[Number]:n:' \\ |
| 159 | + '--stream[Stream]' \\ |
| 160 | + '--stop[Stop]:stop:' \\ |
| 161 | + '--max-tokens[Max tokens]:max_tokens:' \\ |
| 162 | + '--presence-penalty[Presence penalty]:penalty:' \\ |
| 163 | + '--frequency-penalty[Frequency penalty]:penalty:' \\ |
| 164 | + '--user[User ID]:user:' \\ |
| 165 | + '--response-format[Format]:format:->formats' \\ |
| 166 | + '--seed[Seed]:seed:' \\ |
| 167 | + '--tools[Tools JSON]:tools:' \\ |
| 168 | + '--tool-choice[Tool choice]:choice:' |
| 169 | + ;; |
| 170 | + image) |
| 171 | + _arguments \\ |
| 172 | + '--model[Model]:model:(dall-e-3 dall-e-2)' \\ |
| 173 | + '--prompt[Prompt]:prompt:' \\ |
| 174 | + '--size[Size]:size:->sizes' \\ |
| 175 | + '--quality[Quality]:quality:->qualities' \\ |
| 176 | + '--response-format[Format]:format:(url b64)' \\ |
| 177 | + '--style[Style]:style:->styles' \\ |
| 178 | + '--user[User ID]:user:' \\ |
| 179 | + '--n[Number]:n:' |
| 180 | + ;; |
| 181 | + audio) |
| 182 | + _arguments \\ |
| 183 | + '--model[Model]:model:(whisper-1 tts-1 tts-1-hd)' \\ |
| 184 | + '--voice[Voice]:voice:->voices' \\ |
| 185 | + '--input[Input]:input:_files' \\ |
| 186 | + '--response-format[Format]:format:(mp3 opus aac flac wav pcm)' \\ |
| 187 | + '--speed[Speed]:speed:(0.25 0.5 1.0 2.0 4.0)' |
| 188 | + ;; |
| 189 | + files) |
| 190 | + _arguments \\ |
| 191 | + '--file[File]:file:_files' \\ |
| 192 | + '--purpose[Purpose]:purpose:->purposes' |
| 193 | + ;; |
| 194 | + models) |
| 195 | + _arguments \\ |
| 196 | + '--list[List]' \\ |
| 197 | + '--retrieve[Retrieve]:model:->models' \\ |
| 198 | + '--delete[Delete]:model:->models' |
| 199 | + ;; |
| 200 | + completions) |
| 201 | + _arguments \\ |
| 202 | + '--model[Model]:model:->models' \\ |
| 203 | + '--prompt[Prompt]:prompt:' \\ |
| 204 | + '--temperature[Temperature]:temperature:' \\ |
| 205 | + '--max-tokens[Max tokens]:max_tokens:' \\ |
| 206 | + '--echo[Echo]' \\ |
| 207 | + '--stream[Stream]' \\ |
| 208 | + '--stop[Stop]:stop:' \\ |
| 209 | + '--user[User ID]:user:' \\ |
| 210 | + '--seed[Seed]:seed:' |
| 211 | + ;; |
| 212 | + fine_tuning) |
| 213 | + _arguments \\ |
| 214 | + '--model[Model]:model:->models' \\ |
| 215 | + '--training-file[Training file]:file:_files' \\ |
| 216 | + '--hyperparameters[Hyperparams JSON]:params:' \\ |
| 217 | + '--suffix[Suffix]:suffix:' \\ |
| 218 | + '--validation-file[Validation file]:file:_files' |
| 219 | + ;; |
| 220 | + esac |
| 221 | + ;; |
| 222 | + esac |
| 223 | + ;; |
| 224 | + tools) |
| 225 | + _arguments '1: :->tools_cmd' |
| 226 | + case $state in |
| 227 | + tools_cmd) |
| 228 | + _describe -t commands 'tools command' tools_commands |
| 229 | + ;; |
| 230 | + esac |
| 231 | + ;; |
| 232 | + esac |
| 233 | + ;; |
| 234 | + esac |
| 235 | +
|
| 236 | + case $words[-1] in |
| 237 | + --model|-m) _describe -t models 'model' models ;; |
| 238 | + --voice) _describe -t voices 'voice' voices ;; |
| 239 | + --size) _describe -t sizes 'size' sizes ;; |
| 240 | + --response-format) _describe -t formats 'format' formats ;; |
| 241 | + --quality) _describe -t qualities 'quality' qualities ;; |
| 242 | + --style) _describe -t styles 'style' styles ;; |
| 243 | + --purpose) _describe -t purposes 'purpose' purposes ;; |
| 244 | + esac |
| 245 | +} |
| 246 | +
|
| 247 | +_openai "$@" |
| 248 | +""", |
| 249 | + "fish": """\ |
| 250 | +# Fish completion for openai CLI |
| 251 | +# Add to ~/.config/fish/completions/openai.fish |
| 252 | +
|
| 253 | +# Global options |
| 254 | +complete -c openai -l verbose -d "Set verbosity" -f |
| 255 | +complete -c openai -l api-base -d "API base URL" -f |
| 256 | +complete -c openai -l api-key -d "API key" -f |
| 257 | +complete -c openai -l proxy -d "Proxy" -f |
| 258 | +complete -c openai -l organization -d "Organization" -f |
| 259 | +complete -c openai -l api-type -d "API type" -a "openai azure" -f |
| 260 | +complete -c openai -l api-version -d "API version" -f |
| 261 | +complete -c openai -l azure-endpoint -d "Azure endpoint" -f |
| 262 | +complete -c openai -l azure-ad-token -d "Azure AD token" -f |
| 263 | +complete -c openai -l version -d "Show version" -f |
| 264 | +complete -c openai -l help -d "Show help" -f |
| 265 | +
|
| 266 | +# Main commands |
| 267 | +complete -c openai -n '__fish_use_subcommand' -a api -d "Direct API calls" |
| 268 | +complete -c openai -n '__fish_use_subcommand' -a tools -d "Client side tools" |
| 269 | +
|
| 270 | +# API subcommands |
| 271 | +complete -c openai -n '__fish_seen_subcommand_from api' -a chat -d "Chat completions" |
| 272 | +complete -c openai -n '__fish_seen_subcommand_from api' -a image -d "Image generation" |
| 273 | +complete -c openai -n '__fish_seen_subcommand_from api' -a audio -d "Audio/speech" |
| 274 | +complete -c openai -n '__fish_seen_subcommand_from api' -a files -d "File management" |
| 275 | +complete -c openai -n '__fish_seen_subcommand_from api' -a models -d "Model management" |
| 276 | +complete -c openai -n '__fish_seen_subcommand_from api' -a completions -d "Text completions" |
| 277 | +complete -c openai -n '__fish_seen_subcommand_from api' -a fine_tuning -d "Fine-tuning jobs" |
| 278 | +
|
| 279 | +# Tools subcommands |
| 280 | +complete -c openai -n '__fish_seen_subcommand_from tools' -a fine_tunes -d "Fine-tuning tools" |
| 281 | +
|
| 282 | +# Model names |
| 283 | +set -l models gpt-4o gpt-4o-mini gpt-4-turbo gpt-4 gpt-3.5-turbo o1 o1-mini o1-pro o3 o3-mini o4-mini dall-e-3 dall-e-2 whisper-1 tts-1 tts-1-hd |
| 284 | +
|
| 285 | +# Voice options |
| 286 | +set -l voices alloy echo fable onyx nova shimmer |
| 287 | +
|
| 288 | +# Size options |
| 289 | +set -l sizes 256x256 512x512 1024x1024 1792x1024 1024x1792 |
| 290 | +
|
| 291 | +# Chat options |
| 292 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l model -d "Model" -a "$models" -f |
| 293 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l messages -d "Messages JSON" -f |
| 294 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l temperature -d "Temperature" -a "0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0" -f |
| 295 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l top-p -d "Top P" -a "0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0" -f |
| 296 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l n -d "Number" -f |
| 297 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l stream -d "Stream" -f |
| 298 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l stop -d "Stop" -f |
| 299 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l max-tokens -d "Max tokens" -f |
| 300 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l presence-penalty -d "Presence penalty" -a "-2.0 -1.0 0.0 1.0 2.0" -f |
| 301 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l frequency-penalty -d "Frequency penalty" -a "-2.0 -1.0 0.0 1.0 2.0" -f |
| 302 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l user -d "User ID" -f |
| 303 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l response-format -d "Format" -a "json json_object" -f |
| 304 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l seed -d "Seed" -f |
| 305 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l tools -d "Tools JSON" -f |
| 306 | +complete -c openai -n '__fish_seen_subcommand_from chat' -l tool-choice -d "Tool choice" -f |
| 307 | +
|
| 308 | +# Image options |
| 309 | +complete -c openai -n '__fish_seen_subcommand_from image' -l model -d "Model" -a "dall-e-3 dall-e-2" -f |
| 310 | +complete -c openai -n '__fish_seen_subcommand_from image' -l prompt -d "Prompt" -f |
| 311 | +complete -c openai -n '__fish_seen_subcommand_from image' -l size -d "Size" -a "$sizes" -f |
| 312 | +complete -c openai -n '__fish_seen_subcommand_from image' -l quality -d "Quality" -a "standard hd" -f |
| 313 | +complete -c openai -n '__fish_seen_subcommand_from image' -l response-format -d "Format" -a "url b64" -f |
| 314 | +complete -c openai -n '__fish_seen_subcommand_from image' -l style -d "Style" -a "vivid natural" -f |
| 315 | +complete -c openai -n '__fish_seen_subcommand_from image' -l user -d "User ID" -f |
| 316 | +complete -c openai -n '__fish_seen_subcommand_from image' -l n -d "Number" -f |
| 317 | +
|
| 318 | +# Audio options |
| 319 | +complete -c openai -n '__fish_seen_subcommand_from audio' -l model -d "Model" -a "whisper-1 tts-1 tts-1-hd" -f |
| 320 | +complete -c openai -n '__fish_seen_subcommand_from audio' -l voice -d "Voice" -a "$voices" -f |
| 321 | +complete -c openai -n '__fish_seen_subcommand_from audio' -l input -d "Input" -f |
| 322 | +complete -c openai -n '__fish_seen_subcommand_from audio' -l response-format -d "Format" -a "mp3 opus aac flac wav pcm" -f |
| 323 | +complete -c openai -n '__fish_seen_subcommand_from audio' -l speed -d "Speed" -a "0.25 0.5 1.0 2.0 4.0" -f |
| 324 | +
|
| 325 | +# Files options |
| 326 | +complete -c openai -n '__fish_seen_subcommand_from files' -l file -d "File" -f |
| 327 | +complete -c openai -n '__fish_seen_subcommand_from files' -l purpose -d "Purpose" -a "fine-tune assistants" -f |
| 328 | +
|
| 329 | +# Models options |
| 330 | +complete -c openai -n '__fish_seen_subcommand_from models' -l list -d "List" -f |
| 331 | +complete -c openai -n '__fish_seen_subcommand_from models' -l retrieve -d "Retrieve" -a "$models" -f |
| 332 | +complete -c openai -n '__fish_seen_subcommand_from models' -l delete -d "Delete" -a "$models" -f |
| 333 | +
|
| 334 | +# Completions options |
| 335 | +complete -c openai -n '__fish_seen_subcommand_from completions' -l model -d "Model" -a "$models" -f |
| 336 | +complete -c openai -n '__fish_seen_subcommand_from completions' -l prompt -d "Prompt" -f |
| 337 | +complete -c openai -n '__fish_seen_subcommand_from completions' -l temperature -d "Temperature" -a "0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0" -f |
| 338 | +complete -c openai -n '__fish_seen_subcommand_from completions' -l max-tokens -d "Max tokens" -f |
| 339 | +complete -c openai -n '__fish_seen_subcommand_from completions' -l echo -d "Echo" -f |
| 340 | +complete -c openai -n '__fish_seen_subcommand_from completions' -l stream -d "Stream" -f |
| 341 | +complete -c openai -n '__fish_seen_subcommand_from completions' -l stop -d "Stop" -f |
| 342 | +complete -c openai -n '__fish_seen_subcommand_from completions' -l user -d "User ID" -f |
| 343 | +complete -c openai -n '__fish_seen_subcommand_from completions' -l seed -d "Seed" -f |
| 344 | +
|
| 345 | +# Fine-tuning options |
| 346 | +complete -c openai -n '__fish_seen_subcommand_from fine_tuning' -l model -d "Model" -a "$models" -f |
| 347 | +complete -c openai -n '__fish_seen_subcommand_from fine_tuning' -l training-file -d "Training file" -f |
| 348 | +complete -c openai -n '__fish_seen_subcommand_from fine_tuning' -l hyperparameters -d "Hyperparameters JSON" -f |
| 349 | +complete -c openai -n '__fish_seen_subcommand_from fine_tuning' -l suffix -d "Suffix" -f |
| 350 | +complete -c openai -n '__fish_seen_subcommand_from fine_tuning' -l validation-file -d "Validation file" -f |
| 351 | +""", |
| 352 | +} |
| 353 | + |
| 354 | + |
| 355 | +def register(parser: argparse.ArgumentParser) -> None: |
| 356 | + """Register the completion command.""" |
| 357 | + sub = parser.add_parser( |
| 358 | + "completion", |
| 359 | + help="Generate shell completion scripts", |
| 360 | + ) |
| 361 | + sub.add_argument( |
| 362 | + "shell", |
| 363 | + choices=["bash", "zsh", "fish"], |
| 364 | + help="Shell to generate completion for", |
| 365 | + ) |
| 366 | + sub.add_argument( |
| 367 | + "--output", |
| 368 | + "-o", |
| 369 | + help="Output file path (default: stdout)", |
| 370 | + ) |
| 371 | + sub.set_defaults(func=run) |
| 372 | + |
| 373 | + |
| 374 | +def run(args: argparse.Namespace) -> None: |
| 375 | + """Generate shell completion script.""" |
| 376 | + script = COMPLETION_SCRIPTS.get(args.shell) |
| 377 | + if not script: |
| 378 | + print(f"Error: Unsupported shell '{args.shell}'", file=sys.stderr) |
| 379 | + return |
| 380 | + |
| 381 | + if args.output: |
| 382 | + with open(args.output, "w") as f: |
| 383 | + f.write(script) |
| 384 | + print(f"Completion script written to {args.output}") |
| 385 | + print(f"\nTo enable completions:") |
| 386 | + if args.shell == "bash": |
| 387 | + print(f" Add to ~/.bashrc: source {args.output}") |
| 388 | + elif args.shell == "zsh": |
| 389 | + print(f" Add to ~/.zshrc: source {args.output}") |
| 390 | + elif args.shell == "fish": |
| 391 | + print(f" Fish completions are auto-loaded from ~/.config/fish/completions/") |
| 392 | + else: |
| 393 | + print(script) |
0 commit comments