Skip to content

Commit

Permalink
support Azure API key in Bible Chat
Browse files Browse the repository at this point in the history
  • Loading branch information
eliranwong committed Dec 24, 2024
1 parent 212791a commit 64330e2
Show file tree
Hide file tree
Showing 6 changed files with 71 additions and 17 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
# https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/
setup(
name=package,
version="0.2.11",
version="0.2.13",
python_requires=">=3.8, <3.13",
description=f"UniqueBible App is a cross-platform & offline bible application, integrated with high-quality resources and unique features. Developers: Eliran Wong and Oliver Tseng",
long_description=long_description,
Expand Down
20 changes: 18 additions & 2 deletions uniquebible/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,15 +123,17 @@ def isServerAlive(ip, port):

# AI Features

from openai import OpenAI
from openai import OpenAI, AzureOpenAI
from mistralai import Mistral
from groq import Groq
from typing import Optional
from opencc import OpenCC
import unicodedata, traceback, markdown
from uniquebible.util.BibleVerseParser import BibleVerseParser

config.llm_backends = ["openai", "github", "google", "grok", "groq", "mistral"]
config.llm_backends = ["openai", "github", "azure", "google", "grok", "groq", "mistral"]
# check latest version of azure api at https://learn.microsoft.com/en-us/azure/ai-services/openai/reference
config.azure_api_version = "2024-10-21"

def is_CJK(self, text):
for char in text:
Expand All @@ -146,6 +148,8 @@ def isLLMReady(backend=""):
return True
elif backend == "github" and config.githubApi_key:
return True
elif backend == "azure" and config.azureApi_key:
return True
elif backend == "mistral" and config.mistralApi_key:
return True
elif backend == "grok" and config.grokApi_key:
Expand Down Expand Up @@ -252,6 +256,18 @@ def getChatResponse(backend, chatMessages) -> Optional[str]:
max_tokens=config.openaiApi_chat_model_max_tokens,
stream=False,
)
elif backend == "azure":
# azure_endpoint should be something like https://<your-resource-name>.openai.azure.com without "/models" at the end
endpoint = re.sub("/models[/]*$", "", config.azureBaseUrl)
azureClient = AzureOpenAI(azure_endpoint=endpoint,api_version=config.azure_api_version,api_key=config.azureApi_key)
completion = azureClient.chat.completions.create(
model=config.openaiApi_chat_model,
messages=chatMessages,
n=1,
temperature=config.openaiApi_llmTemperature,
max_tokens=config.openaiApi_chat_model_max_tokens,
stream=False,
)
elif backend == "grok":
grokClient = OpenAI(
api_key=config.grokApi_key,
Expand Down
16 changes: 15 additions & 1 deletion uniquebible/gui/Worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from pydub import AudioSegment
from pydub.playback import play
from uniquebible.util.VlcUtil import VlcUtil
from openai import OpenAI
from openai import OpenAI, AzureOpenAI
from mistralai import Mistral
from groq import Groq

Expand Down Expand Up @@ -247,6 +247,18 @@ def getMistralApi_key():
max_tokens=config.openaiApi_chat_model_max_tokens,
stream=True,
)
elif config.llm_backend == "azure":
# azure_endpoint should be something like https://<your-resource-name>.openai.azure.com without "/models" at the end
endpoint = re.sub("/models[/]*$", "", config.azureBaseUrl)
azureClient = AzureOpenAI(azure_endpoint=endpoint,api_version=config.azure_api_version,api_key=config.azureApi_key)
return azureClient.chat.completions.create(
model=config.openaiApi_chat_model,
messages=thisMessage,
n=1,
temperature=config.openaiApi_llmTemperature,
max_tokens=config.openaiApi_chat_model_max_tokens,
stream=True,
)
elif config.llm_backend == "github":
githubClient = OpenAI(
api_key=getGithubApi_key(),
Expand Down Expand Up @@ -320,6 +332,8 @@ def getResponse(self, messages, progress_callback, functionJustCalled=False):
progress = event
elif hasattr(event, "data"): # mistralai
progress = event.data.choices[0].delta.content
elif not event.choices: # in case of the first event of Azure completion
continue
else:
progress = event.choices[0].delta.content
# STREAM THE ANSWER
Expand Down
6 changes: 6 additions & 0 deletions uniquebible/latest_changes.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
PIP package:

0.2.7-0.2.11

* added support of using FREE Github API key

* removed old AI model from Bible Chat list

0.2.6

* added a running mode `api-client-localhost`
Expand Down
34 changes: 23 additions & 11 deletions uniquebible/plugins/menu/Bible Chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ def __init__(self, parent=None):
self.setWindowTitle(config.thisTranslation["settings"])
if config.llm_backend == "openai":
self.apiKeyEdit = QLineEdit(config.openaiApi_key)
elif config.llm_backend == "azure":
self.apiKeyEdit = QLineEdit(config.azureApi_key)
elif config.llm_backend == "google":
self.apiKeyEdit = QLineEdit(config.googleaiApi_key)
elif config.llm_backend == "mistral":
Expand All @@ -76,7 +78,7 @@ def __init__(self, parent=None):
self.apiModelBox = QComboBox()
initialIndex = 0
index = 0
if config.llm_backend in ("openai", "github"):
if config.llm_backend in ("openai", "github", "azure"):
for key in ("gpt-4o", "gpt-4o-mini"):
self.apiModelBox.addItem(key)
if key == config.openaiApi_chat_model:
Expand All @@ -101,7 +103,7 @@ def __init__(self, parent=None):
initialIndex = index
index += 1
elif config.llm_backend == "groq":
for key in ("gemma2-9b-it", "gemma-7b-it", "llama-3.1-70b-versatile", "llama-3.1-8b-instant", "llama-3.2-1b-preview", "llama-3.2-3b-preview", "llama-3.2-11b-vision-preview", "llama-3.2-90b-vision-preview", "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768"):
for key in ("gemma2-9b-it", "gemma-7b-it", "llama-3.3-70b-versatile", "llama-3.1-8b-instant", "llama-3.2-1b-preview", "llama-3.2-3b-preview", "llama-3.2-11b-vision-preview", "llama-3.2-90b-vision-preview", "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768"):
self.apiModelBox.addItem(key)
if key == config.groqApi_chat_model:
initialIndex = index
Expand All @@ -125,7 +127,7 @@ def __init__(self, parent=None):
initialIndex = index
index += 1
self.loadingInternetSearchesBox.setCurrentIndex(initialIndex)
if config.llm_backend in ("openai", "github"):
if config.llm_backend in ("openai", "github", "azure"):
self.maxTokenEdit = QLineEdit(str(config.openaiApi_chat_model_max_tokens))
elif config.llm_backend == "google":
self.maxTokenEdit = QLineEdit(str(config.googleaiApi_chat_model_max_tokens))
Expand Down Expand Up @@ -190,6 +192,9 @@ def __init__(self, parent=None):
optional = config.thisTranslation["optional"]
custom = config.thisTranslation["custom"].lower()
layout.addRow(f"{config.llm_backend.capitalize()} API Key [{required}]:", self.apiKeyEdit)
if config.llm_backend == "azure":
self.azureBaseUrl = QLineEdit(config.azureBaseUrl)
layout.addRow(f"Azure endpoint [{required}]:", self.azureBaseUrl)
#layout.addRow(f"Organization ID [{optional}]:", self.orgEdit)
layout.addRow(f"Chat Model [{required}]:", self.apiModelBox)
layout.addRow(f"Max Token [{required}]:", self.maxTokenEdit)
Expand Down Expand Up @@ -488,14 +493,16 @@ def setupUI(self):
self.backends.setCurrentIndex(0)
elif config.llm_backend == "github":
self.backends.setCurrentIndex(1)
elif config.llm_backend == "google":
elif config.llm_backend == "azure":
self.backends.setCurrentIndex(2)
elif config.llm_backend == "grok":
elif config.llm_backend == "google":
self.backends.setCurrentIndex(3)
elif config.llm_backend == "groq":
self.backends.setCurrentIndex(4)
elif config.llm_backend == "mistral":
self.backends.setCurrentIndex(5)
elif config.llm_backend == "grok":
self.backends.setCurrentIndex(6)
else:
config.llm_backend == "groq"
self.backends.setCurrentIndex(4)
Expand All @@ -504,7 +511,7 @@ def setupUI(self):
self.fontSize.setCurrentIndex((config.chatGPTFontSize - 1))
self.temperature = QComboBox()
self.temperature.addItems([str(i/10) for i in range(0, 21)])
if config.llm_backend in ("openai", "github"):
if config.llm_backend in ("openai", "github", "azure"):
self.temperature.setCurrentIndex(int(config.openaiApi_llmTemperature * 10))
elif config.llm_backend == "google":
self.temperature.setCurrentIndex(int(config.googleaiApi_llmTemperature * 10))
Expand Down Expand Up @@ -716,6 +723,9 @@ def showApiDialog(self):
if result == QDialog.Accepted:
if config.llm_backend == "openai":
config.openaiApi_key = dialog.api_key()
elif config.llm_backend == "azure":
config.azureApi_key = dialog.api_key()
config.azureBaseUrl = dialog.azureBaseUrl.text().strip()
elif config.llm_backend == "google":
config.googleaiApi_key = dialog.api_key()
elif config.llm_backend == "grok":
Expand Down Expand Up @@ -747,7 +757,7 @@ def showApiDialog(self):
os.environ["OPENAI_API_KEY"] = config.openaiApi_key
#config.openaiApiOrganization = dialog.org()
try:
if config.llm_backend in ("openai", "github"):
if config.llm_backend in ("openai", "github", "azure"):
config.openaiApi_chat_model_max_tokens = int(dialog.max_token())
if config.openaiApi_chat_model_max_tokens < 20:
config.openaiApi_chat_model_max_tokens = 20
Expand Down Expand Up @@ -781,7 +791,7 @@ def showApiDialog(self):
config.chatGPTApiAutoScrolling = dialog.enable_auto_scrolling()
config.runPythonScriptGlobally = dialog.enable_runPythonScriptGlobally()
config.chatAfterFunctionCalled = dialog.enable_chatAfterFunctionCalled()
if config.llm_backend ("openai", "github"):
if config.llm_backend ("openai", "github", "azure"):
config.openaiApi_chat_model = dialog.apiModel()
elif config.llm_backend == "google":
config.googleaiApi_chat_model = dialog.apiModel()
Expand Down Expand Up @@ -816,13 +826,15 @@ def updateBackend(self, index):
elif index == 1:
config.llm_backend = "github"
elif index == 2:
config.llm_backend = "google"
config.llm_backend = "azure"
elif index == 3:
config.llm_backend = "grok"
config.llm_backend = "google"
elif index == 4:
config.llm_backend = "groq"
elif index == 5:
config.llm_backend = "mistral"
elif index == 6:
config.llm_backend = "grok"

def updateTemperature(self, index):
if config.llm_backend == "mistral":
Expand All @@ -831,7 +843,7 @@ def updateTemperature(self, index):
config.grokApi_llmTemperature = float(index / 10)
elif config.llm_backend == "groq":
config.groqApi_llmTemperature = float(index / 10)
elif config.llm_backend in ("openai", "github"):
elif config.llm_backend in ("openai", "github", "azure"):
config.openaiApi_llmTemperature = float(index / 10)
elif config.llm_backend == "google":
config.googleaiApi_llmTemperature = float(index / 10)
Expand Down
10 changes: 8 additions & 2 deletions uniquebible/util/ConfigUtil.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ def updateModules(module, isInstalled):
"")
setConfig("groqApi_chat_model", """
# Groq Chat Model""",
"llama-3.1-70b-versatile")
"llama-3.3-70b-versatile")
setConfig("groqApi_chat_model_max_tokens", """
# Groq Chat Maximum Output Tokens""",
8000)
Expand Down Expand Up @@ -327,7 +327,13 @@ def updateModules(module, isInstalled):
# OpenAI API Keys""",
"")
setConfig("githubApi_key", """
# Github API Keys""",
# Github API Keys""", # either a string or a list of strings
"")
setConfig("azureApi_key", """
# Azure API Key""",
"")
setConfig("azureBaseUrl", """
# Github API inference endpoint""",
"")
setConfig("openaiApi_chat_model", """
# OpenAI Chat Model""",
Expand Down

0 comments on commit 64330e2

Please sign in to comment.