From 690a441536c67da63779ed85f2fb2387657e1858 Mon Sep 17 00:00:00 2001 From: Vadser Date: Wed, 14 Aug 2024 12:28:32 +0200 Subject: [PATCH] Rename llama to llama_cpp --- app/helpers/model_versions_helper.rb | 2 +- app/javascript/controllers/model_controller.js | 2 +- app/models/model.rb | 2 +- app/services/executors/{llama.rb => llama_cpp.rb} | 2 +- config/locales/en.yml | 2 +- spec/factories/models.rb | 2 +- .../executors/{llama_spec.rb => llama_cpp_spec.rb} | 2 +- spec/services/model_executor_spec.rb | 8 ++++---- 8 files changed, 11 insertions(+), 11 deletions(-) rename app/services/executors/{llama.rb => llama_cpp.rb} (70%) rename spec/services/executors/{llama_spec.rb => llama_cpp_spec.rb} (97%) diff --git a/app/helpers/model_versions_helper.rb b/app/helpers/model_versions_helper.rb index d7088fa..a01bfeb 100644 --- a/app/helpers/model_versions_helper.rb +++ b/app/helpers/model_versions_helper.rb @@ -4,7 +4,7 @@ module ModelVersionsHelper DEFAULT_CONFIGURATION = { 'openai' => '{"model":"gpt-3.5-turbo","temperature":0.5}', 'ollama' => '{"model":"llama3.1"}', - 'llama' => '{"n_predict":500,"temperature":0.5,"stop":["<|end|>","<|user|>","<|assistant|>","<|endoftext|>","<|system|>"]}' + 'llama_cpp' => '{"n_predict":500,"temperature":0.5,"stop":["<|end|>","<|user|>","<|assistant|>","<|endoftext|>","<|system|>"]}' }.freeze def default_configuration(executor_type) diff --git a/app/javascript/controllers/model_controller.js b/app/javascript/controllers/model_controller.js index 24ce0f6..487cce3 100644 --- a/app/javascript/controllers/model_controller.js +++ b/app/javascript/controllers/model_controller.js @@ -2,7 +2,7 @@ import { Controller } from "@hotwired/stimulus" const DEFAULT_CONFIGURATION = { openai: '{"model":"gpt-3.5-turbo","temperature":0.5}', ollama: '{"model":"llama3.1"}', - llama: '{"n_predict":500,"temperature":0.5,"stop":["<|end|>","<|user|>","<|assistant|>","<|endoftext|>","<|system|>"]}' + llama_cpp: '{"n_predict":500,"temperature":0.5,"stop":["<|end|>","<|user|>","<|assistant|>","<|endoftext|>","<|system|>"]}' } export default class extends Controller { diff --git a/app/models/model.rb b/app/models/model.rb index db6a726..38a32fb 100644 --- a/app/models/model.rb +++ b/app/models/model.rb @@ -7,7 +7,7 @@ class Model < ApplicationRecord has_many :model_versions, dependent: :destroy accepts_nested_attributes_for :model_versions, allow_destroy: true, reject_if: :all_blank - enum :executor_type, { llama: 0, openai: 1, ollama: 2 }, scopes: false, default: :llama + enum :executor_type, { llama_cpp: 0, openai: 1, ollama: 2 }, scopes: false, default: :llama_cpp default_scope { order(id: :desc) } end diff --git a/app/services/executors/llama.rb b/app/services/executors/llama_cpp.rb similarity index 70% rename from app/services/executors/llama.rb rename to app/services/executors/llama_cpp.rb index 77cb6c9..b124047 100644 --- a/app/services/executors/llama.rb +++ b/app/services/executors/llama_cpp.rb @@ -1,6 +1,6 @@ # frozen_string_literal: true module Executors - class Llama < Base + class LlamaCpp < Base end end diff --git a/config/locales/en.yml b/config/locales/en.yml index 88ad6e7..b8b1641 100644 --- a/config/locales/en.yml +++ b/config/locales/en.yml @@ -33,6 +33,6 @@ en: attributes: model: executor_types: - llama: 'Llama.cpp' + llama_cpp: 'Llama.cpp' openai: 'OpenAI' ollama: 'Ollama' diff --git a/spec/factories/models.rb b/spec/factories/models.rb index f33ed0e..4d30700 100644 --- a/spec/factories/models.rb +++ b/spec/factories/models.rb @@ -4,7 +4,7 @@ factory :model do name { 'Test Model' } url { 'http://example.com' } - executor_type { 'llama' } + executor_type { 'llama_cpp' } api_key { 'apikey' } association :user end diff --git a/spec/services/executors/llama_spec.rb b/spec/services/executors/llama_cpp_spec.rb similarity index 97% rename from spec/services/executors/llama_spec.rb rename to spec/services/executors/llama_cpp_spec.rb index ada8c76..c87fef8 100644 --- a/spec/services/executors/llama_spec.rb +++ b/spec/services/executors/llama_cpp_spec.rb @@ -3,7 +3,7 @@ require 'rails_helper' require 'webmock/rspec' -RSpec.describe Executors::Llama do +RSpec.describe Executors::LlamaCpp do describe '#call' do let(:model) { create(:model, url: 'http://example.com/model_endpoint') } let(:model_version) { create(:model_version, model:, configuration: { param: 'value' }) } diff --git a/spec/services/model_executor_spec.rb b/spec/services/model_executor_spec.rb index 13ad094..ea505b1 100644 --- a/spec/services/model_executor_spec.rb +++ b/spec/services/model_executor_spec.rb @@ -21,14 +21,14 @@ end end - context "when executor type is 'base'" do - let(:model) { create(:model, executor_type: 'llama') } + context "when executor type is 'llama_cpp'" do + let(:model) { create(:model, executor_type: 'llama_cpp') } let(:model_version) { create(:model_version, model:) } let(:prompt) { 'Test prompt' } - let(:executor_instance) { instance_double(Executors::Base) } + let(:executor_instance) { instance_double(Executors::LlamaCpp) } before do - allow(Executors::Base).to receive(:new).and_return(executor_instance) + allow(Executors::LlamaCpp).to receive(:new).and_return(executor_instance) allow(executor_instance).to receive(:call).with(prompt).and_return({ result: 'Test result' }) end