The ChatBackend class handles the communication with the Ollama server.
More...
#include <backend.h>
|
void | setModel (const QString &model) |
| Sets the model name.
|
|
void | fetchModelList () |
| Fetches the list of available models from the Ollama server.
|
|
Thread * | getThread (const int index) |
| Returns the thread at the given index.
|
|
void | deleteThread (const int index) |
| Removes the thread at the given index.
|
|
void | clearThreads () |
| Removes all the threads.
|
|
void | sendMessage (const int index, const QString &prompt) |
| Sends a message to the Ollama server.
|
|
void | setSystemPrompt (const QString &prompt) |
| Set the system prompt.
|
|
void | setOllamaServerUrl (const QString &url) |
| Set the ollama server url.
|
|
void | retryLatestMessage (const int index) |
| Retries the latest message.
|
|
The ChatBackend class handles the communication with the Ollama server.
◆ ChatBackend()
llm_chat::ChatBackend::ChatBackend |
( |
QObject * | parent = nullptr | ) |
|
|
explicit |
◆ clearThreads
void llm_chat::ChatBackend::clearThreads |
( |
| ) |
|
|
slot |
◆ deleteThread
void llm_chat::ChatBackend::deleteThread |
( |
const int | index | ) |
|
|
slot |
Removes the thread at the given index.
- Parameters
-
index | The index of the thread in proxy model. |
◆ fetchModelList
void llm_chat::ChatBackend::fetchModelList |
( |
| ) |
|
|
slot |
Fetches the list of available models from the Ollama server.
◆ getThread
Thread * llm_chat::ChatBackend::getThread |
( |
const int | index | ) |
|
|
slot |
Returns the thread at the given index.
- Parameters
-
index | The index of the thread in proxy model. |
- Returns
- The thread at the given index.
◆ model()
QString llm_chat::ChatBackend::model |
( |
| ) |
const |
|
nodiscard |
Get the name of the model.
- Returns
- The name of the model.
◆ modelChanged
void llm_chat::ChatBackend::modelChanged |
( |
| ) |
|
|
signal |
Emitted when the model is changed.
◆ modelList()
QStringList llm_chat::ChatBackend::modelList |
( |
| ) |
const |
|
inlinenodiscard |
Get the list of available models.
◆ modelListFetched
void llm_chat::ChatBackend::modelListFetched |
( |
| ) |
|
|
signal |
Emitted when the list of models is fetched.
◆ newThreadCreated
void llm_chat::ChatBackend::newThreadCreated |
( |
| ) |
|
|
signal |
Emitted when the current thread is changed.
◆ ollamaServerUrl()
QString llm_chat::ChatBackend::ollamaServerUrl |
( |
| ) |
const |
|
nodiscard |
Get ollama server url.
- Returns
- The ollama server url.
◆ ollamaServerUrlChanged
void llm_chat::ChatBackend::ollamaServerUrlChanged |
( |
| ) |
|
|
signal |
Emitted when the ollama server url is changed.
◆ retryLatestMessage
void llm_chat::ChatBackend::retryLatestMessage |
( |
const int | index | ) |
|
|
slot |
Retries the latest message.
- Parameters
-
index | The index of the thread in proxy model. |
◆ sendMessage
void llm_chat::ChatBackend::sendMessage |
( |
const int | index, |
|
|
const QString & | prompt ) |
|
slot |
Sends a message to the Ollama server.
- Parameters
-
prompt | The message to send. |
◆ setModel
void llm_chat::ChatBackend::setModel |
( |
const QString & | model | ) |
|
|
slot |
Sets the model name.
This function sets the name of the model to be used in the backend.
- Parameters
-
model | The name of the model. |
◆ setOllamaServerUrl
void llm_chat::ChatBackend::setOllamaServerUrl |
( |
const QString & | url | ) |
|
|
slot |
Set the ollama server url.
- Parameters
-
url | The ollama server url to set. |
◆ setSystemPrompt
void llm_chat::ChatBackend::setSystemPrompt |
( |
const QString & | prompt | ) |
|
|
slot |
Set the system prompt.
- Parameters
-
prompt | The system prompt to set. |
◆ systemPrompt()
QString llm_chat::ChatBackend::systemPrompt |
( |
| ) |
const |
|
nodiscard |
Get the system prompt.
- Returns
- The system prompt .
◆ systemPromptChanged
void llm_chat::ChatBackend::systemPromptChanged |
( |
| ) |
|
|
signal |
Emitted when the system prompt is changed.
◆ threadList()
ThreadList * llm_chat::ChatBackend::threadList |
( |
| ) |
const |
|
inlinenodiscard |
Returns the chat threads.
- Returns
- All the chat threads.
◆ threadProxyList()
Returns the sorted chat threads.
- Returns
- The sorted chat threads.
◆ model
QString llm_chat::ChatBackend::model |
|
readwrite |
◆ modelList
QStringList llm_chat::ChatBackend::modelList |
|
read |
◆ ollamaServerUrl
QString llm_chat::ChatBackend::ollamaServerUrl |
|
read |
◆ sortedThreads
◆ systemPrompt
QString llm_chat::ChatBackend::systemPrompt |
|
readwrite |
The documentation for this class was generated from the following files: