Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
83 commits
Select commit Hold shift + click to select a range
37218d9
refactor: update styles and prompt selection logic (#103)
Junyi-99 Jan 26, 2026
d9dbc20
feat: initialize projectInstructions variable in conversation prepara…
Junyi-99 Jan 26, 2026
894532a
refactor: update MessageId handling to remove prefix (#105)
Junyi-99 Jan 26, 2026
be7b8c8
add byok modal, update user proto and model
kah-seng Jan 27, 2026
50a5f0b
feat: gpt-5.2, refactored streaming, improved md rendering, basic too…
Junyi-99 Jan 28, 2026
783f85e
Merge branch 'main' of https://github.com/PaperDebugger/paperdebugger…
kah-seng Jan 28, 2026
9603404
update CustomModel proto, model and mapper
kah-seng Jan 28, 2026
49ac78d
add button functions
kah-seng Jan 29, 2026
3ffbb5e
Update xtramcp tag
4ndrelim Jan 29, 2026
5ea58f1
fix: update openaiApiKey handling to refresh model list and cache (#107)
Junyi-99 Jan 29, 2026
d83983b
Merge branch 'staging' of https://github.com/PaperDebugger/paperdebug…
Junyi-99 Jan 29, 2026
74f44d5
refactor: remove gradient handling logic from GeneralToolCard component
Junyi-99 Jan 29, 2026
262f6b7
feat: implement dark mode support and theme synchronization across co…
Junyi-99 Jan 29, 2026
38d7d56
change CustomModel in user.proto to match SupportedModel in chat.proto
kah-seng Jan 30, 2026
3cb6692
allow custom model to be selected in chat
kah-seng Jan 30, 2026
83efbac
Merge branch 'main' into feat/byok
kah-seng Jan 30, 2026
49dd47f
fix: text patch on multi-message
Junyi-99 Jan 31, 2026
07321c9
fix: css
Junyi-99 Jan 31, 2026
3318812
fix: style
Junyi-99 Jan 31, 2026
c2d221e
fix: style
Junyi-99 Jan 31, 2026
b055a17
add: link to report bugs
Junyi-99 Jan 31, 2026
f4e4634
chore: format & lint
Junyi-99 Jan 31, 2026
e0d5808
docs: update qr code
Junyi-99 Jan 31, 2026
9259e76
Merge branch 'main' into feat/byok
kah-seng Feb 3, 2026
6333acc
add select input, update model selection
kah-seng Feb 3, 2026
f693cf4
Merge branch 'main' into feat/byok
kah-seng Feb 14, 2026
adf8216
todo: fix gemini bad request
kah-seng Feb 15, 2026
6ef12a3
feat: tab completion for citation keys (#110)
wjiayis Feb 17, 2026
dab4e6f
chore: update helm for xtramcp (#119)
wjiayis Feb 17, 2026
ea701a5
chore: merge branch 'main' into staging (#122)
wjiayis Feb 17, 2026
6fcdbee
refactor: improve state management and accessibility in components
Junyi-99 Feb 18, 2026
ee2cb93
refactor: improve state management and accessibility in components (#…
Junyi-99 Feb 18, 2026
f19e901
Merge branch 'staging' of https://github.com/PaperDebugger/paperdebug…
Junyi-99 Feb 18, 2026
19269cc
refactor: enhance accessibility and code consistency across components
Junyi-99 Feb 18, 2026
375c342
refactor: update styles and prompt selection logic (#103) (#124)
Junyi-99 Feb 18, 2026
44dd174
Merge branch 'staging' of https://github.com/PaperDebugger/paperdebug…
Junyi-99 Feb 18, 2026
2e593f7
Merge remote-tracking branch 'origin/main' into staging
Junyi-99 Feb 18, 2026
ebbfb44
fix gemini chat params
kah-seng Feb 19, 2026
a7b7ff7
Update llmProvider
kah-seng Feb 21, 2026
110bcaf
Polish UI
kah-seng Feb 21, 2026
58e9aac
Fix defaults
kah-seng Feb 21, 2026
bee8db5
feat: user cost (#126)
wjiayis Mar 11, 2026
f807eaf
Change slug to text input, hide disabled models
kah-seng Mar 18, 2026
035e6da
Allow multiple models with same slugs
kah-seng Mar 18, 2026
c9b078a
Trim inputs, polish UI
kah-seng Mar 19, 2026
6981907
UI polish, do not send disabled models
kah-seng Mar 19, 2026
b6975ef
Merge branch 'staging' into feat/byok
kah-seng Mar 19, 2026
6031234
Make gen
kah-seng Mar 19, 2026
8bf9095
Update UI
kah-seng Mar 19, 2026
167416b
Add input validation error indicator
kah-seng Mar 20, 2026
d107f62
Remove sorting of models by name
kah-seng Mar 20, 2026
6242602
Add optional fields
kah-seng Mar 20, 2026
18a5557
Revert package-lock.json
kah-seng Mar 20, 2026
dc587af
Add baseUrl https validation
kah-seng Mar 20, 2026
adb9032
fix: revert last change staging (#135)
wjiayis Mar 22, 2026
bfeac21
Merge branch 'staging' into feat/byok
kah-seng Mar 26, 2026
8c11e21
make gen
kah-seng Mar 26, 2026
0022664
Resolve Copilot comments
kah-seng Mar 26, 2026
54514a8
Merge pull request #129 from PaperDebugger/feat/byok
kah-seng Mar 26, 2026
2057e72
chore: merge main into staging: add nodeSelector support (#137)
Junyi-99 Mar 28, 2026
041d463
refactor: deduplicate CI workflows and fix double triggers (#138)
Junyi-99 Mar 28, 2026
95163e8
chore: enable in-cluster mongodb for dev and stg (#139)
Junyi-99 Mar 28, 2026
572b57d
docs: add contributing instructions (#127) (#142)
Junyi-99 Mar 28, 2026
89a066b
fix: set PD_MONGO_URI for in-cluster mongo and enable in_cluster (#145)
Junyi-99 Mar 28, 2026
56cbe48
fix: add permissions to backend caller workflows (#146)
Junyi-99 Mar 28, 2026
acf9653
Add customModels to setting-store.ts
kah-seng Apr 6, 2026
3f6871c
Select models by ID instead of slug
kah-seng Apr 6, 2026
0f9dd22
Add loading spinner
kah-seng Apr 6, 2026
2ff12f9
Fix Copilot comments
kah-seng Apr 6, 2026
56d815e
fix: BYOK (#150)
kah-seng Apr 6, 2026
6508989
Show error when same slug and name
kah-seng Apr 7, 2026
7eca4d7
Add tooltips
kah-seng Apr 8, 2026
23ea667
Add temp, parallel tools, store fields
kah-seng Apr 8, 2026
4cad7c9
Add tooltips
kah-seng Apr 9, 2026
04eb9fe
Allow user to configure temp, parallel tools, store params
kah-seng Apr 10, 2026
2a0bd5f
Add tooltips
kah-seng Apr 10, 2026
4774fbe
Merge branch 'staging' into feat/byok
kah-seng Apr 10, 2026
549b5fb
Fix Copilot comments
kah-seng Apr 10, 2026
b7b620a
Add divider in model selection, change save/edit/delete icons to text
kah-seng Apr 19, 2026
30f526d
Add tooltip to BYOK in settings tab
kah-seng Apr 19, 2026
acae564
Underline custom model label in selection
kah-seng Apr 19, 2026
5cca666
Merge branch 'main' into feat/byok
kah-seng Apr 19, 2026
666df1f
Revert hack yaml files
kah-seng Apr 21, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 45 additions & 5 deletions internal/api/chat/create_conversation_message_stream_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,14 @@ package chat

import (
"context"
"fmt"
"paperdebugger/internal/api/mapper"
"paperdebugger/internal/libs/contextutil"
"paperdebugger/internal/libs/shared"
"paperdebugger/internal/models"
"paperdebugger/internal/services"
chatv2 "paperdebugger/pkg/gen/api/chat/v2"
"strings"

"github.com/google/uuid"
"github.com/openai/openai-go/v3"
Expand Down Expand Up @@ -276,12 +278,50 @@ func (s *ChatServerV2) CreateConversationMessageStream(
return s.sendStreamError(stream, err)
}

// Usage is the same as ChatCompletion, just passing the stream parameter
llmProvider := &models.LLMProviderConfig{
APIKey: settings.OpenAIAPIKey,
// Check if user has an API key for requested model
var llmProvider *models.LLMProviderConfig
var customModel *models.CustomModel
customModel = nil

customModelID := req.GetCustomModelId()
if customModelID != "" {
for i := range settings.CustomModels {
if settings.CustomModels[i].Id.Hex() == customModelID {
customModel = &settings.CustomModels[i]
break
}
}
if customModel == nil {
return s.sendStreamError(stream, fmt.Errorf("custom model not found: %q", customModelID))
}
modelSlug = customModel.Slug
}

if customModel == nil {
// User did not specify API key for this model
llmProvider = &models.LLMProviderConfig{
APIKey: "",
IsCustomModel: false,
}
} else {
customModel.BaseUrl = strings.ToLower(customModel.BaseUrl)

if strings.Contains(customModel.BaseUrl, "paperdebugger.com") {
customModel.BaseUrl = ""
}
if !strings.HasPrefix(customModel.BaseUrl, "https://") {
customModel.BaseUrl = strings.Replace(customModel.BaseUrl, "http://", "", 1)
customModel.BaseUrl = "https://" + customModel.BaseUrl
}

llmProvider = &models.LLMProviderConfig{
APIKey: customModel.APIKey,
Endpoint: customModel.BaseUrl,
IsCustomModel: true,
}
}

openaiChatHistory, inappChatHistory, err := s.aiClientV2.ChatCompletionStreamV2(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistoryCompletion, llmProvider)
openaiChatHistory, inappChatHistory, err := s.aiClientV2.ChatCompletionStreamV2(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistoryCompletion, llmProvider, customModel)
if err != nil {
return s.sendStreamError(stream, err)
}
Expand All @@ -307,7 +347,7 @@ func (s *ChatServerV2) CreateConversationMessageStream(
for i, bsonMsg := range conversation.InappChatHistory {
protoMessages[i] = mapper.BSONToChatMessageV2(bsonMsg)
}
title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider)
title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider, modelSlug, customModel)
if err != nil {
s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex())
return
Expand Down
34 changes: 18 additions & 16 deletions internal/api/chat/list_supported_models_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package chat

import (
"context"
"strings"

"paperdebugger/internal/libs/contextutil"
chatv2 "paperdebugger/pkg/gen/api/chat/v2"
Expand Down Expand Up @@ -220,32 +219,35 @@ func (s *ChatServerV2) ListSupportedModels(
return nil, err
}

hasOwnAPIKey := strings.TrimSpace(settings.OpenAIAPIKey) != ""

var models []*chatv2.SupportedModel
for _, config := range allModels {
// Choose the appropriate slug based on whether user has their own API key.
//
// Some models are only available via OpenRouter; for those, slugOpenAI may be empty.
// In that case, keep using the OpenRouter slug to avoid returning an empty model slug.
slug := config.slugOpenRouter
if hasOwnAPIKey && strings.TrimSpace(config.slugOpenAI) != "" {
slug = config.slugOpenAI
}

for _, model := range settings.CustomModels {
modelID := model.Id.Hex()
models = append(models, &chatv2.SupportedModel{
Id: &modelID,
Name: model.Name,
Slug: model.Slug,
TotalContext: int64(model.ContextWindow),
MaxOutput: int64(model.MaxOutput),
InputPrice: int64(model.InputPrice),
OutputPrice: int64(model.OutputPrice),
IsCustom: true,
})
}

for _, config := range allModels {
model := &chatv2.SupportedModel{
Name: config.name,
Slug: slug,
Slug: config.slugOpenRouter,
TotalContext: config.totalContext,
MaxOutput: config.maxOutput,
InputPrice: config.inputPrice,
OutputPrice: config.outputPrice,
}

// If model requires own key but user hasn't provided one, mark as disabled
if config.requireOwnKey && !hasOwnAPIKey {
model.Disabled = true
model.DisabledReason = stringPtr("Requires your own OpenAI API key. Configure it in Settings.")
if config.requireOwnKey {
continue
}

models = append(models, model)
Expand Down
53 changes: 51 additions & 2 deletions internal/api/mapper/user.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,26 +3,75 @@ package mapper
import (
"paperdebugger/internal/models"
userv1 "paperdebugger/pkg/gen/api/user/v1"

"go.mongodb.org/mongo-driver/v2/bson"
)

func MapProtoSettingsToModel(settings *userv1.Settings) *models.Settings {
// Map the slice of custom models
customModels := make([]models.CustomModel, len(settings.CustomModels))
for i, m := range settings.CustomModels {
var id bson.ObjectID

id, err := bson.ObjectIDFromHex(m.Id)
if err != nil {
id = bson.NewObjectID()
}

customModels[i] = models.CustomModel{
Id: id,
Slug: m.Slug,
Name: m.Name,
BaseUrl: m.BaseUrl,
APIKey: m.ApiKey,
ContextWindow: m.ContextWindow,
MaxOutput: m.MaxOutput,
InputPrice: m.InputPrice,
OutputPrice: m.OutputPrice,
Temperature: m.Temperature,
ParallelToolCalls: m.ParallelToolCalls,
Store: m.Store,
}
}

return &models.Settings{
ShowShortcutsAfterSelection: settings.ShowShortcutsAfterSelection,
FullWidthPaperDebuggerButton: settings.FullWidthPaperDebuggerButton,
EnableCitationSuggestion: settings.EnableCitationSuggestion,
EnableCitationSuggestion: settings.EnableCitationSuggestion,
FullDocumentRag: settings.FullDocumentRag,
ShowedOnboarding: settings.ShowedOnboarding,
OpenAIAPIKey: settings.OpenaiApiKey,
CustomModels: customModels,
}
}

func MapModelSettingsToProto(settings *models.Settings) *userv1.Settings {
// Map the slice back to Proto
customModels := make([]*userv1.CustomModel, len(settings.CustomModels))
for i, m := range settings.CustomModels {
customModels[i] = &userv1.CustomModel{
Id: m.Id.Hex(),
Slug: m.Slug,
Name: m.Name,
BaseUrl: m.BaseUrl,
ApiKey: m.APIKey,
ContextWindow: m.ContextWindow,
MaxOutput: m.MaxOutput,
InputPrice: m.InputPrice,
OutputPrice: m.OutputPrice,
Temperature: m.Temperature,
ParallelToolCalls: m.ParallelToolCalls,
Store: m.Store,
}
}

return &userv1.Settings{
ShowShortcutsAfterSelection: settings.ShowShortcutsAfterSelection,
FullWidthPaperDebuggerButton: settings.FullWidthPaperDebuggerButton,
EnableCitationSuggestion: settings.EnableCitationSuggestion,
EnableCitationSuggestion: settings.EnableCitationSuggestion,
FullDocumentRag: settings.FullDocumentRag,
ShowedOnboarding: settings.ShowedOnboarding,
OpenaiApiKey: settings.OpenAIAPIKey,
CustomModels: customModels,
}
}
9 changes: 6 additions & 3 deletions internal/models/llm_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,13 @@ package models

// LLMProviderConfig holds the configuration for LLM API calls.
// If both Endpoint and APIKey are empty, the system default will be used.
// If IsCustomModel is true, the user-requested slug with corresponding
// API keys and endpoint should be used.
type LLMProviderConfig struct {
Endpoint string
APIKey string
ModelName string
Endpoint string
APIKey string
ModelName string
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i recall our last discussion that this field is to be unique so users can differentiate different API keys of the same slugs. Is this still the case? If so, how do we ensure it is unique?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep, as mentioned in our pm, it's currently being enforced on the frontend

IsCustomModel bool
}

// IsCustom returns true if the user has configured custom LLM provider settings.
Expand Down
28 changes: 22 additions & 6 deletions internal/models/user.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,29 @@ package models

import "go.mongodb.org/mongo-driver/v2/bson"

type CustomModel struct {
Id bson.ObjectID `bson:"_id"`
Slug string `bson:"slug"`
Name string `bson:"name"`
BaseUrl string `bson:"base_url"`
APIKey string `bson:"api_key"`
ContextWindow int32 `bson:"context_window"`
MaxOutput int32 `bson:"max_output"`
InputPrice int32 `bson:"input_price"`
OutputPrice int32 `bson:"output_price"`
Temperature float32 `bson:"temperature"`
ParallelToolCalls bool `bson:"parallel_tool_calls"`
Store bool `bson:"store"`
}

type Settings struct {
ShowShortcutsAfterSelection bool `bson:"show_shortcuts_after_selection"`
FullWidthPaperDebuggerButton bool `bson:"full_width_paper_debugger_button"`
EnableCitationSuggestion bool `bson:"enable_citation_suggestion"`
FullDocumentRag bool `bson:"full_document_rag"`
ShowedOnboarding bool `bson:"showed_onboarding"`
OpenAIAPIKey string `bson:"openai_api_key"`
ShowShortcutsAfterSelection bool `bson:"show_shortcuts_after_selection"`
FullWidthPaperDebuggerButton bool `bson:"full_width_paper_debugger_button"`
EnableCitationSuggestion bool `bson:"enable_citation_suggestion"`
FullDocumentRag bool `bson:"full_document_rag"`
ShowedOnboarding bool `bson:"showed_onboarding"`
OpenAIAPIKey string `bson:"openai_api_key"`
CustomModels []CustomModel `bson:"custom_models"`
}

type User struct {
Expand Down
22 changes: 12 additions & 10 deletions internal/services/toolkit/client/client_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,18 +32,20 @@ func (a *AIClientV2) GetOpenAIClient(llmConfig *models.LLMProviderConfig) *opena
var Endpoint string = llmConfig.Endpoint
var APIKey string = llmConfig.APIKey

if Endpoint == "" {
if APIKey != "" {
// User provided their own API key, use the OpenAI-compatible endpoint
Endpoint = a.cfg.OpenAIBaseURL // standard openai base url
} else {
// suffix needed for cloudflare gateway
Endpoint = a.cfg.InferenceBaseURL + "/openrouter"
if !llmConfig.IsCustomModel {
if Endpoint == "" {
if APIKey != "" {
// User provided their own API key, use the OpenAI-compatible endpoint
Endpoint = a.cfg.OpenAIBaseURL // standard openai base url
} else {
// suffix needed for cloudflare gateway
Endpoint = a.cfg.InferenceBaseURL + "/openrouter"
}
}
}

if APIKey == "" {
APIKey = a.cfg.InferenceAPIKey
if APIKey == "" {
APIKey = a.cfg.InferenceAPIKey
}
}

opts := []option.RequestOption{
Expand Down
8 changes: 4 additions & 4 deletions internal/services/toolkit/client/completion_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ import (
// 1. The full chat history sent to the language model (including any tool call results).
// 2. The incremental chat history visible to the user (including tool call results and assistant responses).
// 3. An error, if any occurred during the process.
func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig) (OpenAIChatHistory, AppChatHistory, error) {
openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV2(ctx, nil, "", modelSlug, messages, llmProvider)
func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig, customModel *models.CustomModel) (OpenAIChatHistory, AppChatHistory, error) {
openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV2(ctx, nil, "", modelSlug, messages, llmProvider, customModel)
if err != nil {
return nil, nil, err
}
Expand Down Expand Up @@ -54,7 +54,7 @@ func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, mes
// - If tool calls are required, it handles them and appends the results to the chat history, then continues the loop.
// - If no tool calls are needed, it appends the assistant's response and exits the loop.
// - Finally, it returns the updated chat histories and any error encountered.
func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig) (OpenAIChatHistory, AppChatHistory, error) {
func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig, customModel *models.CustomModel) (OpenAIChatHistory, AppChatHistory, error) {
openaiChatHistory := messages
inappChatHistory := AppChatHistory{}

Expand All @@ -66,7 +66,7 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream
}()

oaiClient := a.GetOpenAIClient(llmProvider)
params := getDefaultParamsV2(modelSlug, a.toolCallHandler.Registry)
params := getDefaultParamsV2(modelSlug, a.toolCallHandler.Registry, customModel)

for {
params.Messages = openaiChatHistory
Expand Down
2 changes: 1 addition & 1 deletion internal/services/toolkit/client/get_citation_keys.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ func (a *AIClientV2) GetCitationKeys(ctx context.Context, sentence string, userI
_, resp, err := a.ChatCompletionV2(ctx, "gpt-5.2", OpenAIChatHistory{
openai.SystemMessage("You are a helpful assistant that suggests relevant citation keys."),
openai.UserMessage(message),
}, llmProvider)
}, llmProvider, nil)

if err != nil {
return nil, err
Expand Down
12 changes: 9 additions & 3 deletions internal/services/toolkit/client/get_conversation_title_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (
"github.com/samber/lo"
)

func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistory []*chatv2.Message, llmProvider *models.LLMProviderConfig) (string, error) {
func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistory []*chatv2.Message, llmProvider *models.LLMProviderConfig, modelSlug string, customModel *models.CustomModel) (string, error) {
messages := lo.Map(inappChatHistory, func(message *chatv2.Message, _ int) string {
if _, ok := message.Payload.MessageType.(*chatv2.MessagePayload_Assistant); ok {
return fmt.Sprintf("Assistant: %s", message.Payload.GetAssistant().GetContent())
Expand All @@ -29,10 +29,16 @@ func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistor
message := strings.Join(messages, "\n")
message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message)

_, resp, err := a.ChatCompletionV2(ctx, "gpt-5-nano", OpenAIChatHistory{
// Default model if user is not using their own
modelToUse := "gpt-5-nano"
if llmProvider.IsCustomModel {
modelToUse = modelSlug
}

_, resp, err := a.ChatCompletionV2(ctx, modelToUse, OpenAIChatHistory{
openai.SystemMessage("You are a helpful assistant that generates a title for a conversation."),
openai.UserMessage(message),
}, llmProvider)
}, llmProvider, customModel)
if err != nil {
return "", err
}
Expand Down
Loading
Loading