High severity7.1GHSA Advisory· Published May 15, 2026
CVE-2026-45349
CVE-2026-45349
Description
Open WebUI is a self-hosted artificial intelligence platform designed to operate entirely offline. Prior to 0.9.0, a user just needs to use the API endpoint: /api/chat/completions with their own API key (generated in OWUI) and the Chat ID of another user to continue the conversation of the other user. This vulnerability is fixed in 0.9.0.
Affected products
1- Range: <= 0.8.12
Patches
17 files changed · +462 −190
backend/open_webui/main.py+222 −56 modified@@ -116,7 +116,7 @@ from open_webui.models.functions import Functions from open_webui.models.models import Models from open_webui.models.users import UserModel, Users -from open_webui.models.chats import Chats +from open_webui.models.chats import Chats, ChatForm from open_webui.config import ( # Ollama @@ -1693,13 +1693,30 @@ async def chat_completion( if model_info_params.get('reasoning_tags') is not None: reasoning_tags = model_info_params.get('reasoning_tags') + # parent_id signals intent: + # null → new chat (root message, no parent) + # value → follow-up (user message's parentId = prev assistant) + # absent → legacy caller, no chat management + is_new_chat = 'parent_id' in form_data and form_data['parent_id'] is None and not form_data.get('chat_id') + parent_id = form_data.pop('parent_id', None) + form_data.pop('new_chat', None) # Legacy field + + # Multi-model: {model_id: assistant_message_id} + # Single-model fallback: built from 'model' + 'id' + message_ids = form_data.pop('message_ids', None) + if not message_ids: + message_ids = {model_id: form_data.pop('id', None)} + else: + form_data.pop('id', None) + + user_message = form_data.pop('user_message', None) or form_data.pop('parent_message', None) metadata = { 'user_id': user.id, 'chat_id': form_data.pop('chat_id', None), - 'message_id': form_data.pop('id', None), - 'parent_message': form_data.pop('parent_message', None), - 'parent_message_id': form_data.pop('parent_id', None), + 'user_message': user_message, + 'user_message_id': user_message.get('id') if user_message else None, 'session_id': form_data.pop('session_id', None), + 'folder_id': form_data.pop('folder_id', None), 'filter_ids': form_data.pop('filter_ids', []), 'tool_ids': form_data.get('tool_ids', None), 'tool_servers': form_data.pop('tool_servers', None), @@ -1722,36 +1739,160 @@ async def chat_completion( }, } + if is_new_chat: + metadata['chat_id'] = str(uuid4()) + if metadata.get('chat_id') and user: - if not metadata['chat_id'].startswith('local:'): # temporary chats are not stored - # Verify chat ownership — lightweight EXISTS check avoids - # deserializing the full chat JSON blob just to confirm the row exists - if ( - not await Chats.is_chat_owner(metadata['chat_id'], user.id) and user.role != 'admin' - ): # admins can access any chat - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail=ERROR_MESSAGES.DEFAULT(), + chat_id = metadata['chat_id'] + if not chat_id.startswith('local:'): # temporary chats are not stored + if is_new_chat: + # Build the full history upfront with ALL assistant placeholders + user_message = metadata.get('user_message') or {} + user_message_id = user_message.get('id') if user_message else None + + history_messages = {} + all_assistant_ids = [assistant_id for assistant_id in message_ids.values() if assistant_id] + + if user_message_id and user_message: + user_message['childrenIds'] = all_assistant_ids + history_messages[user_message_id] = user_message + + for target_model_id, assistant_message_id in message_ids.items(): + if assistant_message_id: + history_messages[assistant_message_id] = { + 'id': assistant_message_id, + 'parentId': user_message_id, + 'childrenIds': [], + 'role': 'assistant', + 'content': '', + 'done': False, + 'model': target_model_id, + 'timestamp': int(time.time()), + } + + await Chats.insert_new_chat( + chat_id, + user.id, + ChatForm( + chat={ + 'id': chat_id, + 'title': 'New Chat', + 'models': list(message_ids.keys()), + 'history': { + 'currentId': all_assistant_ids[0] if all_assistant_ids else user_message_id, + 'messages': history_messages, + }, + 'messages': [ + {'role': 'user', 'content': user_message.get('content', '')}, + ] if user_message_id else [], + 'tags': [], + 'timestamp': int(time.time() * 1000), + }, + folder_id=metadata.get('folder_id'), + ), ) - # Insert chat files from parent message if any - parent_message = metadata.get('parent_message') or {} - parent_message_files = parent_message.get('files', []) - if parent_message_files: - try: - await Chats.insert_chat_files( - metadata['chat_id'], - parent_message.get('id'), - [ - file_item.get('id') - for file_item in parent_message_files - if file_item.get('type') == 'file' - ], - user.id, + # Insert chat files from user message if any + user_message_files = user_message.get('files', []) + if user_message_files: + try: + await Chats.insert_chat_files( + chat_id, + user_message_id, + [ + file_item.get('id') + for file_item in user_message_files + if file_item.get('type') == 'file' + ], + user.id, + ) + except Exception as e: + log.debug(f'Error inserting chat files: {e}') + pass + else: + # Existing chat — verify ownership + if ( + not await Chats.is_chat_owner(chat_id, user.id) and user.role != 'admin' + ): + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=ERROR_MESSAGES.DEFAULT(), + ) + + # Save user message to DB + user_message = metadata.get('user_message') or {} + if user_message and user_message.get('id'): + await Chats.upsert_message_to_chat_by_id_and_message_id( + chat_id, + user_message['id'], + user_message, ) - except Exception as e: - log.debug(f'Error inserting chat files: {e}') - pass + + # Link grandparent → user message (childrenIds) + grandparent_id = user_message.get('parentId') + if grandparent_id: + grandparent = await Chats.get_message_by_id_and_message_id(chat_id, grandparent_id) + if grandparent: + child_ids = grandparent.get('childrenIds', []) + if user_message['id'] not in child_ids: + child_ids.append(user_message['id']) + await Chats.upsert_message_to_chat_by_id_and_message_id( + chat_id, grandparent_id, {'childrenIds': child_ids} + ) + + # Insert chat files from user message if any + user_message_files = user_message.get('files', []) + if user_message_files: + try: + await Chats.insert_chat_files( + chat_id, + user_message.get('id'), + [ + file_item.get('id') + for file_item in user_message_files + if file_item.get('type') == 'file' + ], + user.id, + ) + except Exception as e: + log.debug(f'Error inserting chat files: {e}') + pass + + # Save ALL assistant placeholders + user_message_id = metadata.get('user_message_id') + all_assistant_ids = [assistant_id for assistant_id in message_ids.values() if assistant_id] + + # Link user message → all assistant messages (childrenIds) + if user_message_id and all_assistant_ids: + existing_user_message = await Chats.get_message_by_id_and_message_id( + chat_id, user_message_id + ) + if existing_user_message: + child_ids = existing_user_message.get('childrenIds', []) + for assistant_id in all_assistant_ids: + if assistant_id not in child_ids: + child_ids.append(assistant_id) + await Chats.upsert_message_to_chat_by_id_and_message_id( + chat_id, user_message_id, {'childrenIds': child_ids}, + ) + + # Save each assistant placeholder + for target_model_id, assistant_message_id in message_ids.items(): + if assistant_message_id: + await Chats.upsert_message_to_chat_by_id_and_message_id( + chat_id, + assistant_message_id, + { + 'id': assistant_message_id, + 'parentId': user_message_id, + 'childrenIds': [], + 'role': 'assistant', + 'content': '', + 'done': False, + 'model': target_model_id, + 'timestamp': int(time.time()), + }, + ) request.state.metadata = metadata form_data['metadata'] = metadata @@ -1783,19 +1924,6 @@ async def process_chat(request, form_data, user, metadata, model): except Exception: detail = f'Provider returned HTTP {response.status_code}' raise Exception(detail) - if metadata.get('chat_id') and metadata.get('message_id'): - try: - if not metadata['chat_id'].startswith('local:'): - await Chats.upsert_message_to_chat_by_id_and_message_id( - metadata['chat_id'], - metadata['message_id'], - { - 'parentId': metadata.get('parent_message_id', None), - 'model': model_id, - }, - ) - except Exception: - pass ctx = await build_chat_response_context(request, form_data, user, model, metadata, tasks, events) @@ -1824,7 +1952,7 @@ async def process_chat(request, form_data, user, metadata, model): metadata['chat_id'], metadata['message_id'], { - 'parentId': metadata.get('parent_message_id', None), + 'parentId': metadata.get('user_message_id', None), 'error': {'content': str(e)}, }, ) @@ -1875,19 +2003,55 @@ async def _cleanup_mcp(): except Exception as e: log.debug(f'Error emitting chat:active: {e}') - if metadata.get('session_id') and metadata.get('chat_id') and metadata.get('message_id'): - # Asynchronous Chat Processing - task_id, _ = await create_task( - request.app.state.redis, - process_chat(request, form_data, user, metadata, model), - id=metadata['chat_id'], - ) - # Emit chat:active=true when task starts - event_emitter = await get_event_emitter(metadata, update_db=False) - if event_emitter: - await event_emitter({'type': 'chat:active', 'data': {'active': True}}) - return {'status': True, 'task_id': task_id} + # Fan out: one task per model + if metadata.get('session_id') and metadata.get('chat_id'): + task_ids = [] + chat_id = metadata['chat_id'] + + for target_model_id, assistant_message_id in message_ids.items(): + if not assistant_message_id: + continue + + # Per-model metadata: own message_id + model + per_model_metadata = { + **metadata, + 'message_id': assistant_message_id, + } + + # Per-model form_data: own model + model_form_data = { + **form_data, + 'model': target_model_id, + 'metadata': per_model_metadata, + } + + # Resolve the model object for this specific model + resolved_model = request.app.state.MODELS.get(target_model_id, model) + + task_id, _ = await create_task( + request.app.state.redis, + process_chat(request, model_form_data, user, per_model_metadata, resolved_model), + id=chat_id, + ) + task_ids.append(task_id) + + # Emit chat:active=true + if task_ids: + event_emitter = await get_event_emitter( + {**metadata, 'message_id': list(message_ids.values())[0]}, + update_db=False, + ) + if event_emitter: + await event_emitter({'type': 'chat:active', 'data': {'active': True}}) + + return { + 'status': True, + 'task_ids': task_ids, + 'chat_id': chat_id, + } else: + # Legacy/direct: single model, synchronous + metadata['message_id'] = list(message_ids.values())[0] return await process_chat(request, form_data, user, metadata, model) @@ -1962,6 +2126,8 @@ async def generate_messages( @app.post('/api/chat/completed') async def chat_completed(request: Request, form_data: dict, user=Depends(get_verified_user)): + """Deprecated: outlet filters now run inline during chat completion. + Kept for backward compatibility with external integrations.""" try: model_item = form_data.pop('model_item', {})
backend/open_webui/models/chats.py+1 −2 modified@@ -293,10 +293,9 @@ def _sanitize_chat_row(self, chat_item): return changed async def insert_new_chat( - self, user_id: str, form_data: ChatForm, db: Optional[AsyncSession] = None + self, id: str, user_id: str, form_data: ChatForm, db: Optional[AsyncSession] = None ) -> Optional[ChatModel]: async with get_async_db_context(db) as db: - id = str(uuid.uuid4()) chat = ChatModel( **{ 'id': id,
backend/open_webui/routers/chats.py+2 −1 modified@@ -1,6 +1,7 @@ import json import logging from typing import Optional +from uuid import uuid4 from sqlalchemy.ext.asyncio import AsyncSession import asyncio from fastapi.responses import StreamingResponse @@ -557,7 +558,7 @@ async def create_new_chat( db: AsyncSession = Depends(get_async_session), ): try: - chat = await Chats.insert_new_chat(user.id, form_data, db=db) + chat = await Chats.insert_new_chat(str(uuid4()), user.id, form_data, db=db) return ChatResponse(**chat.model_dump()) except Exception as e: log.exception(e)
backend/open_webui/utils/automations.py+9 −2 modified@@ -307,8 +307,9 @@ async def execute_automation(app, automation: AutomationModel) -> None: user_msg_id = str(uuid4()) assistant_msg_id = str(uuid4()) - # Create the chat with user message (same structure as frontend) + chat_id = str(uuid4()) chat = await Chats.insert_new_chat( + chat_id, automation.user_id, ChatForm( chat={ @@ -378,7 +379,13 @@ async def execute_automation(app, automation: AutomationModel) -> None: 'stream': True, 'chat_id': chat.id, 'id': assistant_msg_id, - 'parent_id': user_msg_id, + 'parent_id': None, # Root message (chat already created above) + 'user_message': { + 'id': user_msg_id, + 'parentId': None, + 'role': 'user', + 'content': prompt, + }, 'session_id': f'automation:{automation.id}', 'background_tasks': {}, }
backend/open_webui/utils/middleware.py+109 −3 modified@@ -2153,10 +2153,10 @@ async def process_chat_payload(request, form_data, user, metadata, model): # Load messages from DB when available — DB preserves structured 'output' items # which the frontend strips, causing tool calls to be merged into content. chat_id = metadata.get('chat_id') - parent_message_id = metadata.get('parent_message_id') + user_message_id = metadata.get('user_message_id') - if chat_id and parent_message_id and not chat_id.startswith('local:'): - db_messages = await load_messages_from_db(chat_id, parent_message_id) + if chat_id and user_message_id and not chat_id.startswith('local:'): + db_messages = await load_messages_from_db(chat_id, user_message_id) if db_messages: system_message = get_system_message(form_data.get('messages', [])) form_data['messages'] = [system_message, *db_messages] if system_message else db_messages @@ -3061,6 +3061,110 @@ async def background_tasks_handler(ctx): pass +async def outlet_filter_handler(ctx): + """Run outlet filters inline after chat completion. + + Replaces the separate POST /api/chat/completed round-trip. + Persists outlet-modified content to DB and emits a chat:outlet event + so the frontend can sync its in-memory state. + """ + request = ctx['request'] + user = ctx['user'] + model = ctx['model'] + metadata = ctx['metadata'] + event_emitter = ctx.get('event_emitter') + event_caller = ctx.get('event_caller') + + chat_id = metadata.get('chat_id', '') + message_id = metadata.get('message_id') + + if not chat_id or chat_id.startswith('local:') or not message_id: + return + + try: + messages_map = await Chats.get_messages_map_by_chat_id(chat_id) + if not messages_map: + return + + message_list = get_message_list(messages_map, message_id) + if not message_list: + return + + model_id = model.get('id') if isinstance(model, dict) else model + + outlet_data = { + 'model': model_id, + 'messages': [ + { + 'id': m.get('id'), + 'role': m.get('role'), + 'content': m.get('content', ''), + 'info': m.get('info'), + 'timestamp': m.get('timestamp'), + **(({'usage': m['usage']} if m.get('usage') else {})), + **(({'sources': m['sources']} if m.get('sources') else {})), + } + for m in message_list + ], + 'filter_ids': metadata.get('filter_ids', []), + 'chat_id': chat_id, + 'session_id': metadata.get('session_id'), + 'id': message_id, + } + + # Pipeline outlet filters + models = request.app.state.MODELS + try: + outlet_data = await process_pipeline_outlet_filter(request, outlet_data, user, models) + except Exception as e: + log.debug(f'Pipeline outlet filter error: {e}') + + # Function outlet filters + extra_params = { + '__event_emitter__': event_emitter, + '__event_call__': event_caller, + '__user__': user.model_dump() if isinstance(user, UserModel) else {}, + '__metadata__': metadata, + '__request__': request, + '__model__': model, + } + + filter_ids = await get_sorted_filter_ids(request, model, metadata.get('filter_ids', [])) + filter_functions = await Functions.get_functions_by_ids(filter_ids) + + outlet_result, _ = await process_filter_functions( + request=request, + filter_functions=filter_functions, + filter_type='outlet', + form_data=outlet_data, + extra_params=extra_params, + ) + + # Persist outlet-modified content and notify frontend + if outlet_result and outlet_result.get('messages'): + for msg in outlet_result['messages']: + msg_id = msg.get('id') + if msg_id and msg_id in messages_map: + original = messages_map[msg_id] + if original.get('content') != msg.get('content'): + await Chats.upsert_message_to_chat_by_id_and_message_id( + chat_id, + msg_id, + { + 'content': msg['content'], + 'originalContent': original.get('content'), + }, + ) + + if event_emitter: + await event_emitter({ + 'type': 'chat:outlet', + 'data': {'messages': outlet_result['messages']}, + }) + except Exception as e: + log.debug(f'Error running outlet filters: {e}') + + async def non_streaming_chat_response_handler(response, ctx): request = ctx['request'] @@ -3182,6 +3286,7 @@ async def non_streaming_chat_response_handler(response, ctx): ) await background_tasks_handler(ctx) + await outlet_filter_handler(ctx) response = build_response_object(response, merge_events_into_response(response_data, events)) except Exception as e: @@ -4693,6 +4798,7 @@ async def restricted_import(name, globals=None, locals=None, fromlist=(), level= ) await background_tasks_handler(ctx) + await outlet_filter_handler(ctx) except asyncio.CancelledError: log.warning('Task was cancelled!') try:
src/lib/components/chat/Chat.svelte+107 −125 modified@@ -81,7 +81,6 @@ import { processWeb, processWebSearch, processYoutubeVideo } from '$lib/apis/retrieval'; import { getAndUpdateUserLocation, getUserSettings } from '$lib/apis/users'; import { - chatCompleted, generateQueries, chatAction, generateMoACompletion, @@ -491,6 +490,22 @@ if (autoScroll) { scrollToBottom('smooth'); } + } else if (type === 'chat:outlet') { + // Outlet filter ran on backend — sync in-memory state + const outletMessages = data.messages ?? []; + for (const msg of outletMessages) { + if (msg?.id && history.messages[msg.id]) { + const existing = history.messages[msg.id]; + if (existing.content !== msg.content) { + history.messages[msg.id] = { + ...existing, + originalContent: existing.content, + ...msg + }; + } + } + } + history = history; } else if (type === 'chat:message:favorite') { // Update message favorite status message.favorite = data.favorite; @@ -1361,6 +1376,17 @@ taskIds = taskRes.task_ids; } + // If no active tasks and current message is incomplete, generation was interrupted + const currentMessage = history.currentId ? history.messages[history.currentId] : null; + if ( + currentMessage && + currentMessage.role === 'assistant' && + !currentMessage.done && + (!taskIds || taskIds.length === 0) + ) { + currentMessage.done = true; + } + await tick(); return true; @@ -1416,71 +1442,12 @@ }; const chatCompletedHandler = async (_chatId, modelId, responseMessageId, messages) => { - if (!responseMessageId) { - console.error('chatCompleted: missing message id', { - chatId: _chatId, - modelId, - messageCount: messages?.length ?? 0 - }); - return; - } - - const res = await chatCompleted(localStorage.token, { - model: modelId, - messages: messages.map((m) => ({ - id: m.id, - role: m.role, - content: m.content, - info: m.info ? m.info : undefined, - timestamp: m.timestamp, - ...(m.usage ? { usage: m.usage } : {}), - ...(m.sources ? { sources: m.sources } : {}) - })), - filter_ids: selectedFilterIds.length > 0 ? selectedFilterIds : undefined, - model_item: $models.find((m) => m.id === modelId), - chat_id: _chatId, - session_id: $socket?.id, - id: responseMessageId - }).catch((error) => { - toast.error(`${error}`); - messages.at(-1).error = { content: error }; - - return null; - }); - - if (res !== null && res.messages) { - // Update chat history with the new messages - for (const message of res.messages) { - if (message?.id) { - // Add null check for message and message.id - history.messages[message.id] = { - ...history.messages[message.id], - ...(history.messages[message.id].content !== message.content - ? { originalContent: history.messages[message.id].content } - : {}), - ...message - }; - } - } - } - - await tick(); - - if ($chatId == _chatId) { - if (!$temporaryChatEnabled) { - chat = await updateChatById(localStorage.token, _chatId, { - models: selectedModels, - messages: messages, - history: history, - params: params, - files: chatFiles - }); - - currentChatPage.set(1); - await chats.set(await getChatList(localStorage.token, $currentChatPage)); - } + // Backend handles outlet filters and persistence inline. + // Just refresh the sidebar chat list. + if ($chatId == _chatId && !$temporaryChatEnabled) { + currentChatPage.set(1); + await chats.set(await getChatList(localStorage.token, $currentChatPage)); } - taskIds = null; }; @@ -1894,7 +1861,7 @@ saveSessionSelectedModels(); - await sendMessage(history, userMessageId, { newChat: true }); + await sendMessage(history, userMessageId); }; const submitHandler = async (userPrompt, { _raw = false } = {}) => { @@ -1994,13 +1961,11 @@ { messages = null, modelId = null, - modelIdx = null, - newChat = false + modelIdx = null }: { messages?: any[] | null; modelId?: string | null; modelIdx?: number | null; - newChat?: boolean; } = {} ) => { if (autoScroll) { @@ -2019,6 +1984,8 @@ : selectedModels; // Create response messages for each selected model + // Build message_ids map: {model_id: assistant_message_id} + const messageIdsMap: Record<string, string> = {}; for (const [_modelIdx, modelId] of selectedModelIds.entries()) { const model = $models.filter((m) => m.id === modelId).at(0); @@ -2043,76 +2010,78 @@ // Append messageId to childrenIds of parent message if (parentId !== null && history.messages[parentId]) { - // Add null check before accessing childrenIds history.messages[parentId].childrenIds = [ ...history.messages[parentId].childrenIds, responseMessageId ]; } responseMessageIds[`${modelId}-${modelIdx ? modelIdx : _modelIdx}`] = responseMessageId; + messageIdsMap[modelId] = responseMessageId; } } history = history; - // Create new chat if newChat is true and first user message - if (newChat && _history.messages[_history.currentId].parentId === null) { - _chatId = await initChatHandler(_history); + // New chat — backend generates the chat_id on first request + if (!_chatId) { + if ($temporaryChatEnabled) { + _chatId = `local:${$socket?.id}`; + await chatId.set(_chatId); + } + await tick(); } await tick(); + // Re-clone history so sendMessageSocket gets the response messages we just added _history = structuredClone(history); - // Save chat after all messages have been created - await saveChatHandler(_chatId, _history); - - await Promise.all( - selectedModelIds.map(async (modelId, _modelIdx) => { - console.log('modelId', modelId); - const model = $models.filter((m) => m.id === modelId).at(0); - - if (model) { - // If there are image files, check if model is vision capable - // Skip this check if image generation is enabled, as images may be for editing or are generated outputs in the history - const hasImages = createMessagesList(_history, parentId).some((message) => - message.files?.some( - (file) => file.type === 'image' || (file?.content_type ?? '').startsWith('image/') - ) - ); - if ( - hasImages && - !(model.info?.meta?.capabilities?.vision ?? true) && - !imageGenerationEnabled - ) { - toast.error( - $i18n.t('Model {{modelName}} is not vision capable', { - modelName: model.name ?? model.id - }) - ); - } + // Vision capability check + for (const mid of selectedModelIds) { + const model = $models.filter((m) => m.id === mid).at(0); + if (model) { + const hasImages = createMessagesList(_history, parentId).some((message) => + message.files?.some( + (file) => file.type === 'image' || (file?.content_type ?? '').startsWith('image/') + ) + ); - let responseMessageId = - responseMessageIds[`${modelId}-${modelIdx ? modelIdx : _modelIdx}`]; - const chatEventEmitter = await getChatEventEmitter(model.id, _chatId); - - scrollToBottom(); - await sendMessageSocket( - model, - messages && messages.length > 0 - ? messages - : createMessagesList(_history, responseMessageId), - _history, - responseMessageId, - _chatId + if ( + hasImages && + !(model.info?.meta?.capabilities?.vision ?? true) && + !imageGenerationEnabled + ) { + toast.error( + $i18n.t('Model {{modelName}} is not vision capable', { + modelName: model.name ?? model.id + }) ); - - if (chatEventEmitter) clearInterval(chatEventEmitter); - } else { - toast.error($i18n.t(`Model {{modelId}} not found`, { modelId })); } - }) - ); + } + } + + // Single request — backend fans out to all models + const primaryModelId = selectedModelIds[0]; + const primaryModel = $models.filter((m) => m.id === primaryModelId).at(0); + const primaryResponseMessageId = messageIdsMap[primaryModelId]; + + if (primaryModel && primaryResponseMessageId) { + const chatEventEmitter = await getChatEventEmitter(primaryModel.id, _chatId); + + scrollToBottom(); + await sendMessageSocket( + primaryModel, + messages && messages.length > 0 + ? messages + : createMessagesList(_history, primaryResponseMessageId), + _history, + primaryResponseMessageId, + _chatId, + selectedModelIds.length > 1 ? messageIdsMap : undefined + ); + + if (chatEventEmitter) clearInterval(chatEventEmitter); + } }; const getFeatures = () => { @@ -2167,7 +2136,7 @@ .map((token) => decodeURIComponent(JSON.parse(`"${token.replace(/"/g, '\\"')}"`))); }; - const sendMessageSocket = async (model, _messages, _history, responseMessageId, _chatId) => { + const sendMessageSocket = async (model, _messages, _history, responseMessageId, _chatId, messageIdsMap?: Record<string, string>) => { const responseMessage = _history.messages[responseMessageId]; const userMessage = _history.messages[responseMessage.parentId]; @@ -2357,12 +2326,13 @@ model_item: $models.find((m) => m.id === model.id), session_id: $socket?.id, - chat_id: $chatId, + chat_id: _chatId || undefined, folder_id: $selectedFolder?.id ?? undefined, id: responseMessageId, - parent_id: userMessage?.id ?? null, - parent_message: userMessage, + ...(messageIdsMap ? { message_ids: messageIdsMap } : {}), + parent_id: userMessage?.parentId ?? null, + user_message: userMessage, background_tasks: { ...(!$temporaryChatEnabled && @@ -2419,10 +2389,22 @@ if (res.error) { await handleOpenAIError(res.error, responseMessage); } else { + // Backend returns task_ids (multi-model) or task_id (single model) + const newTaskIds = res.task_ids ?? (res.task_id ? [res.task_id] : []); if (taskIds) { - taskIds.push(res.task_id); + taskIds.push(...newTaskIds); } else { - taskIds = [res.task_id]; + taskIds = newTaskIds; + } + + // Backend returns chat_id for new chats — set store + URL + if (res.chat_id && $chatId !== res.chat_id) { + await chatId.set(res.chat_id); + if (!$temporaryChatEnabled) { + window.history.replaceState(history.state, '', `/c/${res.chat_id}`); + currentChatPage.set(1); + await chats.set(await getChatList(localStorage.token, $currentChatPage)); + } } } }
src/lib/components/layout/Sidebar/ChatItem.svelte+12 −1 modified@@ -88,10 +88,21 @@ let mouseOver = false; + // Local state: tracks the last updatedAt seen while the user was viewing + // this chat. Survives prop refreshes from sidebar data re-fetches that + // would overwrite the `lastReadAt` prop with a stale server value. + let viewedAt: number | null = null; + + $: if (id === $chatId) { + viewedAt = updatedAt ?? Date.now() / 1000; + } + + $: effectiveReadAt = Math.max(lastReadAt ?? 0, viewedAt ?? 0) || null; + $: unread = id !== $chatId && !$activeChatIds.has(id) && - (lastReadAt === null || (updatedAt !== null && updatedAt > lastReadAt)); + (effectiveReadAt === null || (updatedAt !== null && updatedAt > effectiveReadAt)); const loadChat = async () => { if (!chat) {
Vulnerability mechanics
AI mechanics synthesis has not run for this CVE yet.
References
5- github.com/advisories/GHSA-gfm2-xm6c-37qcghsaADVISORY
- github.com/open-webui/open-webui/commit/cf4218e688def6f11d195aeda6665ae5b5376b67ghsa
- github.com/open-webui/open-webui/releases/tag/v0.9.0ghsa
- github.com/open-webui/open-webui/security/advisories/GHSA-gfm2-xm6c-37qcnvd
- nvd.nist.gov/vuln/detail/CVE-2026-45349ghsa
News mentions
0No linked articles in our index yet.