Skip to content

Commit

Permalink
Update cli.py
Browse files Browse the repository at this point in the history
[ UPDATE ] v1.2.1

## Updated

- Expanded CLI to include options for audio mode, system instructions, and JSON output, providing users with greater flexibility and control over API interaction.

## Added

- New command line options to reflect the added functionalities and ensure comprehensive access to all toolkit features from the command line interface.
  • Loading branch information
RMNCLDYO committed Apr 18, 2024
1 parent dcd7fc1 commit 0b02d2f
Showing 1 changed file with 21 additions and 13 deletions.
34 changes: 21 additions & 13 deletions cli.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import argparse
from gemini import Chat, Text, Vision
from gemini import Chat, Text, Vision, Audio

def main():
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
Expand All @@ -10,7 +10,7 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
------------------------------------------------------------------
Gemini AI Toolkit
API Wrapper & Command-line Interface
[v1.2.0] by @rmncldyo
[v1.2.1] by @rmncldyo
------------------------------------------------------------------
Gemini AI toolit is an API wrapper and command-line interface for Google's latest Gemini Pro and Gemini Ultra large-language models.
Expand All @@ -20,11 +20,14 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
| -c, --chat | Enable chat mode | --chat |
| -t, --text | Enable text mode | --text |
| -v, --vision | Enable vision mode | --vision |
| -a, --audio | Enable audio mode | --audio |
| -p, --prompt | User prompt | --prompt "Write a story about a magic backpack." |
| -i, --image | Image file path or url | --image "path_or_url_goes_here" |
| -a, --api_key | Gemini API key for authentication | --api_key "api_key_goes_here" |
| -m, --model | The model you would like to use | --model "model_name_goes_here" |
| -s, --stream | Enable streaming mode for responses | --stream |
| -m, --media | Media file path or url | --media "path_to_media_file" |
| -s, --stream | Enable streaming output | --stream |
| -js, --json | Enable JSON output | --json |
| -ak, --api_key | Gemini API key for authentication | --api_key "api_key_goes_here" |
| -md, --model | The model you would like to use | --model "model_name_goes_here" |
| -sp, --system_prompt | System prompt (instructions) for model | --system_prompt "Write a story about a magic backpack." |
| -mt, --max_tokens | Maximum number of tokens to generate | --max_tokens 1024 |
| -tm, --temperature | Sampling temperature | --temperature 0.7 |
| -tp, --top_p | Nucleus sampling threshold | --top_p 0.9 |
Expand All @@ -40,11 +43,14 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
parser.add_argument('-c', '--chat', action='store_true', help='Enable chat mode')
parser.add_argument('-t', '--text', action='store_true', help='Enable text mode')
parser.add_argument('-v', '--vision', action='store_true', help='Enable vision mode')
parser.add_argument('-a', '--audio', action='store_true', help='Enable audio mode')
parser.add_argument('-p', '--prompt', type=str, help='Text or Vision prompt', metavar='')
parser.add_argument('-i', '--image', type=str, help='Image file path or url', metavar='')
parser.add_argument('-a', '--api_key', type=str, help='Gemini API key for authentication', metavar='')
parser.add_argument('-m', '--model', type=str, default='gemini-1.0-pro-latest', help='The model you would like to use', metavar='')
parser.add_argument('-s', '--stream', action='store_true', help='Enable streaming mode for responses')
parser.add_argument('-m', '--media', type=str, help='Media file path or url', metavar='')
parser.add_argument('-s', '--stream', action='store_true', help='Enable streaming output')
parser.add_argument('-js', '--json', action='store_true', help='Enable JSON output')
parser.add_argument('-ak', '--api_key', type=str, help='Gemini API key for authentication', metavar='')
parser.add_argument('-md', '--model', type=str, default='gemini-1.0-pro-latest', help='The model you would like to use', metavar='')
parser.add_argument('-sp', '--system_prompt', type=str, help='System prompt (instructions) for model', metavar='')
parser.add_argument('-mt', '--max_tokens', type=int, help='Maximum number of tokens to generate', metavar='')
parser.add_argument('-tm', '--temperature', type=float, help='Sampling temperature', metavar='')
parser.add_argument('-tp', '--top_p', type=float, help='Nucleus sampling threshold', metavar='')
Expand All @@ -57,11 +63,13 @@ class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
args = parser.parse_args()

if args.chat:
Chat().run(args.api_key, args.model, args.prompt, args.max_tokens, args.temperature, args.top_p, args.top_k, args.candidate_count, args.stop_sequences, args.saftey_categories, args.saftey_thresholds, args.stream)
Chat().run(args.api_key, args.model, args.prompt, args.stream, args.json, args.system_prompt, args.max_tokens, args.temperature, args.top_p, args.top_k, args.candidate_count, args.stop_sequences, args.saftey_categories, args.saftey_thresholds)
elif args.text:
Text().run(args.api_key, args.model, args.prompt, args.max_tokens, args.temperature, args.top_p, args.top_k, args.candidate_count, args.stop_sequences, args.saftey_categories, args.saftey_thresholds, args.stream)
Text().run(args.api_key, args.model, args.prompt, args.stream, args.json, args.system_prompt, args.max_tokens, args.temperature, args.top_p, args.top_k, args.candidate_count, args.stop_sequences, args.saftey_categories, args.saftey_thresholds)
elif args.vision:
Vision().run(args.api_key, args.model, args.prompt, args.image, args.max_tokens, args.temperature, args.top_p, args.top_k, args.candidate_count, args.stop_sequences, args.saftey_categories, args.saftey_thresholds, args.stream)
Vision().run(args.api_key, args.model, args.prompt, args.media, args.stream, args.json, args.system_prompt, args.max_tokens, args.temperature, args.top_p, args.top_k, args.candidate_count, args.stop_sequences, args.saftey_categories, args.saftey_thresholds)
elif args.audio:
Audio().run(args.api_key, args.model, args.prompt, args.media, args.stream, args.json, args.system_prompt, args.max_tokens, args.temperature, args.top_p, args.top_k, args.candidate_count, args.stop_sequences, args.saftey_categories, args.saftey_thresholds)
else:
print("Error: Please specify a mode to use. Use --help for more information.")
exit()
Expand Down

0 comments on commit 0b02d2f

Please sign in to comment.