OpenAI

Azure OpenAI and OpenAI are currently some of the most popular LLM services out there today. Despite gecholog being LLM agnostic, we thought it be convenient to show how easily gecholog can be used with standard openai library for python.

Chat completion

Set the AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT as environment variables.

setx AZURE_OPENAI_API_KEY "your_api_key"
setx AZURE_OPENAI_ENDPOINT "http://localhost:5380/service/standard"   # Without end /
export AZURE_OPENAI_API_KEY=your_api_key
export AZURE_OPENAI_ENDPOINT=http://localhost:5380/service/standard   # Without end /


A standard python example (just replace the your_deployment part)

import os 
import openai

openai.api_key = os.getenv("AZURE_OPENAI_API_KEY")
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_type = "azure"
openai.api_version = "2023-12-01-preview", # this might change in the future

response = openai.ChatCompletion.create(
    engine="your_deployment", # The deployment name you chose when you deployed the GPT-35-Turbo or GPT-4 model
    messages=[
        {"role": "system", "content": "Assistant is a large language model trained by OpenAI."},
        {"role": "user", "content": "Who are the founders of Toyota?"}
    ],
    max_tokens=15
)

print(response)
import os 
from openai import AzureOpenAI

client = AzureOpenAI(
    api_key=os.getenv("AZURE_OPENAI_API_KEY"),  
    api_version="2023-12-01-preview", # this might change in the future
    azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)

response = client.chat.completions.create(
    model="your_deployment", # model = "deployment_name"
    messages=[
        {"role":"system","content":"Assistant is a large language model trained by OpenAI."},
        {"role":"user","content":"Who are the founders of Toyota?"}
    ],
    temperature=0.7,
    max_tokens=15
)

print(response)


Completion

import os
import openai

openai.api_type = "azure"
openai.api_version = "2023-12-01-preview", # this might change in the future
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_key = os.getenv("OPENAI_API_KEY")

response = openai.Completion.create(
    engine="your_deployment",  # The deployment name
    prompt="Who were the founders of Microsoft?",
    max_tokens=100,
    temperature=1,
    frequency_penalty=0,
    presence_penalty=0,
    top_p=0.5,
    stop=None
)

print(response)
import os 
from openai import AzureOpenAI

client = AzureOpenAI(
    api_key=os.getenv("AZURE_OPENAI_API_KEY"),  
    api_version="2023-12-01-preview", # this might change in the future
    azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)

response = client.chat.completions.create(
    model="your_deployment", # model = "deployment_name"
    prompt="Who were the founders of Microsoft?",
    max_tokens=100,
    temperature=1,
    frequency_penalty=0,
    presence_penalty=0,
    top_p=0.5,
    stop=None
)

print(response)


Embedding

import os
import openai

openai.api_type = "azure"
openai.api_version = "2023-12-01-preview", # this might change in the future
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_key = os.getenv("AZURE_OPENAI_API_KEY")

text = 'the quick brown fox jumped over the lazy dog'
response = openai.Embedding().create(
    input=[text], 
    engine='text-embedding-ada-002' # The deployment name
)

print(response)
import os 
from openai import AzureOpenAI

client = AzureOpenAI(
    api_key=os.getenv("AZURE_OPENAI_API_KEY"),  
    api_version="2023-12-01-preview", # this might change in the future
    azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)

response = client.embeddings.create(
    model="text-embedding-ada-002", # model = "deployment_name"
    input="the quick brown fox jumped over the lazy dog"
)
print(response)