mirror of
https://github.com/rzmk/learnhouse.git
synced 2025-12-19 04:19:25 +00:00
feat: org wide ai features check
This commit is contained in:
parent
de93d56945
commit
077c26ce15
24 changed files with 573 additions and 163 deletions
|
|
@ -15,8 +15,13 @@
|
|||
"esbenp.prettier-vscode",
|
||||
"ms-python.isort",
|
||||
"redhat.vscode-yaml"
|
||||
]
|
||||
],
|
||||
"settings": {
|
||||
"[python]": {
|
||||
"editor.defaultFormatter": "ms-python.python"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"shutdownAction": "stopCompose"
|
||||
}
|
||||
}
|
||||
|
|
@ -7,13 +7,19 @@ RUN pip install poetry
|
|||
#
|
||||
WORKDIR /usr/learnhouse/apps/api
|
||||
|
||||
#
|
||||
COPY ./requirements.txt /usr/learnhouse/requirements.txt
|
||||
# Copy poetry.lock* in case it doesn't exist in the repo
|
||||
COPY ./poetry.lock* /usr/learnhouse/
|
||||
|
||||
#
|
||||
RUN poetry config virtualenvs.create false \
|
||||
&& pip install --upgrade pip \
|
||||
&& pip install -r /usr/learnhouse/requirements.txt
|
||||
# Copy project requirement files here to ensure they will be cached.
|
||||
COPY pyproject.toml /usr/learnhouse/
|
||||
|
||||
# Install poetry
|
||||
RUN pip install --upgrade pip \
|
||||
&& pip install poetry \
|
||||
&& poetry config virtualenvs.create false
|
||||
|
||||
# Install project dependencies.
|
||||
RUN poetry install --no-interaction --no-ansi
|
||||
|
||||
#
|
||||
COPY ./ /usr/learnhouse
|
||||
|
|
|
|||
51
apps/api/poetry.lock
generated
51
apps/api/poetry.lock
generated
|
|
@ -226,17 +226,17 @@ typecheck = ["mypy"]
|
|||
|
||||
[[package]]
|
||||
name = "boto3"
|
||||
version = "1.34.17"
|
||||
version = "1.34.18"
|
||||
description = "The AWS SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">= 3.8"
|
||||
files = [
|
||||
{file = "boto3-1.34.17-py3-none-any.whl", hash = "sha256:1efc02be786884034d503d59c018cf7650d0cff9fcb37cd2eb49b802a6fe6111"},
|
||||
{file = "boto3-1.34.17.tar.gz", hash = "sha256:8ca248cc84e7e859e4e276eb9c4309fa01a3e58473bf48d6c33448be870c2bb8"},
|
||||
{file = "boto3-1.34.18-py3-none-any.whl", hash = "sha256:ae7cfdf45f4dfd33bd3e84e36afcfbf0517e64a32e647989a068f34d053572b8"},
|
||||
{file = "boto3-1.34.18.tar.gz", hash = "sha256:5e38ca63007e903a7efe0a1751a0374d287b50d7bc148b9d3d495cdf74a0b712"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
botocore = ">=1.34.17,<1.35.0"
|
||||
botocore = ">=1.34.18,<1.35.0"
|
||||
jmespath = ">=0.7.1,<2.0.0"
|
||||
s3transfer = ">=0.10.0,<0.11.0"
|
||||
|
||||
|
|
@ -245,13 +245,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
|
|||
|
||||
[[package]]
|
||||
name = "botocore"
|
||||
version = "1.34.17"
|
||||
version = "1.34.18"
|
||||
description = "Low-level, data-driven core of boto 3."
|
||||
optional = false
|
||||
python-versions = ">= 3.8"
|
||||
files = [
|
||||
{file = "botocore-1.34.17-py3-none-any.whl", hash = "sha256:7272c39032c6f1d62781e4c8445d9a1d9140c2bf52ba7ee66bf6db559c4b2427"},
|
||||
{file = "botocore-1.34.17.tar.gz", hash = "sha256:e48a662f3a6919219276b55085e8f73c3347966675f55e9d448be30cf79678ee"},
|
||||
{file = "botocore-1.34.18-py3-none-any.whl", hash = "sha256:2067d8385c11b7cf2d336227d8fa5aea632fe61afbadb3168dc169dcc13d8c3e"},
|
||||
{file = "botocore-1.34.18.tar.gz", hash = "sha256:85a77e72560a45b0dfdad94f92f5e114c82be07a51bb2d19dd310dab8be158cf"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -1362,13 +1362,13 @@ extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.
|
|||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.1.9"
|
||||
version = "0.1.10"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
files = [
|
||||
{file = "langchain_core-0.1.9-py3-none-any.whl", hash = "sha256:1dd45aec185ce3afb1c19fb2e88cdbc19fafa7ae929d8107799a7c82ef69ea9f"},
|
||||
{file = "langchain_core-0.1.9.tar.gz", hash = "sha256:4b51fdbdbc06027c26ea89a6da809cae2e404c9daa95dc6c10e3eae383d8ea6a"},
|
||||
{file = "langchain_core-0.1.10-py3-none-any.whl", hash = "sha256:d89952f6d0766cfc88d9f1e25b84d56f8d7bd63a45ad8ec1a9a038c9b49df16d"},
|
||||
{file = "langchain_core-0.1.10.tar.gz", hash = "sha256:3c9e1383264c102fcc6f865700dbb9416c4931a25d0ac2195f6311c6b867aa17"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -1384,15 +1384,32 @@ tenacity = ">=8.1.0,<9.0.0"
|
|||
[package.extras]
|
||||
extended-testing = ["jinja2 (>=3,<4)"]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-openai"
|
||||
version = "0.0.2.post1"
|
||||
description = "An integration package connecting OpenAI and LangChain"
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
files = [
|
||||
{file = "langchain_openai-0.0.2.post1-py3-none-any.whl", hash = "sha256:ba468b94c23da9d8ccefe5d5a3c1c65b4b9702292523e53acc689a9110022e26"},
|
||||
{file = "langchain_openai-0.0.2.post1.tar.gz", hash = "sha256:f8e78db4a663feeac71d9f036b9422406c199ea3ef4c97d99ff392c93530e073"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
langchain-core = ">=0.1.7,<0.2"
|
||||
numpy = ">=1,<2"
|
||||
openai = ">=1.6.1,<2.0.0"
|
||||
tiktoken = ">=0.5.2,<0.6.0"
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.0.79"
|
||||
version = "0.0.80"
|
||||
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
|
||||
optional = false
|
||||
python-versions = ">=3.8.1,<4.0"
|
||||
files = [
|
||||
{file = "langsmith-0.0.79-py3-none-any.whl", hash = "sha256:be0374e913c36d9f6a13dd6b6e20a506066d5a0f3abfd476f9cf9e0b086ed744"},
|
||||
{file = "langsmith-0.0.79.tar.gz", hash = "sha256:d32639ccd18a92533b302f6f482255619afc8eb007fff91e37ee699d947c5e29"},
|
||||
{file = "langsmith-0.0.80-py3-none-any.whl", hash = "sha256:dee1c6ef9e8241b82a8851926624269954d0ff8e22d82e32e73455f387f4e245"},
|
||||
{file = "langsmith-0.0.80.tar.gz", hash = "sha256:6d22ee07eb41c65b3f5166b20041a026714952497d9e80d5be6879d3a5c14d84"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -2013,13 +2030,13 @@ sympy = "*"
|
|||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "1.7.1"
|
||||
version = "1.7.2"
|
||||
description = "The official Python library for the openai API"
|
||||
optional = false
|
||||
python-versions = ">=3.7.1"
|
||||
files = [
|
||||
{file = "openai-1.7.1-py3-none-any.whl", hash = "sha256:e52ad7ea015331edc584e6e9c98741c819d7ffbbd2ecc50bf1f55c33f9cb3f77"},
|
||||
{file = "openai-1.7.1.tar.gz", hash = "sha256:7556e6aa30e20254b1ad68de49bb5ef4d8106bfac5e8a78abdc1daa911fbb1fb"},
|
||||
{file = "openai-1.7.2-py3-none-any.whl", hash = "sha256:8f41b90a762f5fd9d182b45851041386fed94c8ad240a70abefee61a68e0ef53"},
|
||||
{file = "openai-1.7.2.tar.gz", hash = "sha256:c73c78878258b07f1b468b0602c6591f25a1478f49ecb90b9bd44b7cc80bce73"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -4424,4 +4441,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.11"
|
||||
content-hash = "05aa4db63c592f8c68e48c3f9aa71e7d376852faaaef9a95c0d0ae74d848bae0"
|
||||
content-hash = "76237f0e04218f9ca9a2593ccf952452bd6d45657066feec87373279fb7fe6a2"
|
||||
|
|
|
|||
|
|
@ -38,6 +38,8 @@ sentence-transformers = "^2.2.2"
|
|||
python-dotenv = "^1.0.0"
|
||||
redis = "^5.0.1"
|
||||
langchain-community = "^0.0.11"
|
||||
langchain-openai = "^0.0.2.post1"
|
||||
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ sentry-sdk[fastapi]
|
|||
pydantic[email]>=1.8.0,<2.0.0
|
||||
langchain==0.1.0
|
||||
langchain-community
|
||||
langchain-openai
|
||||
tiktoken
|
||||
openai
|
||||
chromadb
|
||||
|
|
|
|||
|
|
@ -1,7 +1,4 @@
|
|||
from json import JSONEncoder
|
||||
import json
|
||||
from typing import Literal, Optional
|
||||
from click import Option
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy import JSON, BigInteger, Column, ForeignKey
|
||||
from sqlmodel import Field, SQLModel
|
||||
|
|
@ -21,6 +18,7 @@ class AIEnabledFeatures(BaseModel):
|
|||
|
||||
|
||||
class AIConfig(BaseModel):
|
||||
enabled : bool = True
|
||||
limits: AILimitsSettings = AILimitsSettings()
|
||||
embeddings: Literal[
|
||||
"text-embedding-ada-002", "all-MiniLM-L6-v2"
|
||||
|
|
|
|||
|
|
@ -29,6 +29,6 @@ class OrganizationCreate(OrganizationBase):
|
|||
class OrganizationRead(OrganizationBase):
|
||||
id: int
|
||||
org_uuid: str
|
||||
config: OrganizationConfig | dict
|
||||
config: Optional[OrganizationConfig | dict]
|
||||
creation_date: str
|
||||
update_date: str
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from typing import List
|
||||
from fastapi import APIRouter, Depends, Request, UploadFile
|
||||
from sqlmodel import Session
|
||||
from src.db.organization_config import OrganizationConfigBase
|
||||
from src.db.users import PublicUser
|
||||
from src.db.organizations import (
|
||||
Organization,
|
||||
|
|
@ -12,6 +13,7 @@ from src.core.events.database import get_db_session
|
|||
from src.security.auth import get_current_user
|
||||
from src.services.orgs.orgs import (
|
||||
create_org,
|
||||
create_org_with_config,
|
||||
delete_org,
|
||||
get_organization,
|
||||
get_organization_by_slug,
|
||||
|
|
@ -37,6 +39,23 @@ async def api_create_org(
|
|||
return await create_org(request, org_object, current_user, db_session)
|
||||
|
||||
|
||||
# Temporary pre-alpha code
|
||||
@router.post("/withconfig/")
|
||||
async def api_create_org_withconfig(
|
||||
request: Request,
|
||||
org_object: OrganizationCreate,
|
||||
config_object: OrganizationConfigBase,
|
||||
current_user: PublicUser = Depends(get_current_user),
|
||||
db_session: Session = Depends(get_db_session),
|
||||
) -> OrganizationRead:
|
||||
"""
|
||||
Create new organization
|
||||
"""
|
||||
return await create_org_with_config(
|
||||
request, org_object, current_user, db_session, config_object
|
||||
)
|
||||
|
||||
|
||||
@router.get("/{org_id}")
|
||||
async def api_get_org(
|
||||
request: Request,
|
||||
|
|
@ -110,7 +129,7 @@ async def api_update_org(
|
|||
"""
|
||||
Update Org by ID
|
||||
"""
|
||||
return await update_org(request, org_object,org_id, current_user, db_session)
|
||||
return await update_org(request, org_object, org_id, current_user, db_session)
|
||||
|
||||
|
||||
@router.delete("/{org_id}")
|
||||
|
|
|
|||
|
|
@ -2,6 +2,9 @@ from uuid import uuid4
|
|||
from fastapi import Depends, HTTPException, Request
|
||||
from requests import session
|
||||
from sqlmodel import Session, select
|
||||
from src.db.organization_config import OrganizationConfig
|
||||
from src.db.organizations import Organization
|
||||
from src.services.ai.utils import check_limits_and_config, count_ai_ask
|
||||
from src.db.courses import Course, CourseRead
|
||||
from src.core.events.database import get_db_session
|
||||
from src.db.users import PublicUser
|
||||
|
|
@ -29,6 +32,7 @@ def ai_start_activity_chat_session(
|
|||
"""
|
||||
Start a new AI Chat session with a Course Activity
|
||||
"""
|
||||
|
||||
# Get the Activity
|
||||
statement = select(Activity).where(
|
||||
Activity.activity_uuid == chat_session_object.activity_uuid
|
||||
|
|
@ -46,6 +50,14 @@ def ai_start_activity_chat_session(
|
|||
course = db_session.exec(statement).first()
|
||||
course = CourseRead.from_orm(course)
|
||||
|
||||
# Get the Organization
|
||||
statement = select(Organization).where(Organization.id == course.org_id)
|
||||
org = db_session.exec(statement).first()
|
||||
|
||||
# Check limits and usage
|
||||
check_limits_and_config(db_session, org) # type: ignore
|
||||
count_ai_ask(org, "increment") # type: ignore
|
||||
|
||||
if not activity:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
|
|
@ -61,28 +73,48 @@ def ai_start_activity_chat_session(
|
|||
structured, course, activity
|
||||
)
|
||||
|
||||
# Get Activity Organization
|
||||
statement = select(Organization).where(Organization.id == course.org_id)
|
||||
org = db_session.exec(statement).first()
|
||||
|
||||
# Get Organization Config
|
||||
statement = select(OrganizationConfig).where(
|
||||
OrganizationConfig.org_id == org.id # type: ignore
|
||||
)
|
||||
result = db_session.exec(statement)
|
||||
org_config = result.first()
|
||||
|
||||
org_config = OrganizationConfig.from_orm(org_config)
|
||||
embeddings = org_config.config["AIConfig"]["embeddings"]
|
||||
ai_model = org_config.config["AIConfig"]["ai_model"]
|
||||
|
||||
chat_session = get_chat_session_history()
|
||||
|
||||
message = "You are a helpful Education Assistant, and you are helping a student with the associated Course. "
|
||||
message += "Use the available tools to get context about this question even if the question is not specific enough."
|
||||
message += "For context, this is the Course name :"
|
||||
message += course.name
|
||||
message += " and this is the Lecture name :"
|
||||
message += activity.name
|
||||
message += "."
|
||||
message += "Use your knowledge to help the student if the context is not enough."
|
||||
|
||||
response = ask_ai(
|
||||
chat_session_object.message,
|
||||
chat_session['message_history'],
|
||||
chat_session["message_history"],
|
||||
ai_friendly_text,
|
||||
"You are a helpful Education Assistant, and you are helping a student with the associated Course. "
|
||||
"Use the available tools to get context about this question even if the question is not specific enough."
|
||||
"For context, this is the Course name :"
|
||||
+ course.name
|
||||
+ " and this is the Lecture name :"
|
||||
+ activity.name
|
||||
+ "."
|
||||
"Use your knowledge to help the student.",
|
||||
message,
|
||||
embeddings,
|
||||
ai_model,
|
||||
)
|
||||
|
||||
return ActivityAIChatSessionResponse(
|
||||
aichat_uuid=chat_session['aichat_uuid'],
|
||||
aichat_uuid=chat_session["aichat_uuid"],
|
||||
activity_uuid=activity.activity_uuid,
|
||||
message=response["output"],
|
||||
)
|
||||
|
||||
|
||||
def ai_send_activity_chat_message(
|
||||
request: Request,
|
||||
chat_session_object: SendActivityAIChatMessage,
|
||||
|
|
@ -109,6 +141,14 @@ def ai_send_activity_chat_message(
|
|||
course = db_session.exec(statement).first()
|
||||
course = CourseRead.from_orm(course)
|
||||
|
||||
# Get the Organization
|
||||
statement = select(Organization).where(Organization.id == course.org_id)
|
||||
org = db_session.exec(statement).first()
|
||||
|
||||
# Check limits and usage
|
||||
check_limits_and_config(db_session, org) # type: ignore
|
||||
count_ai_ask(org, "increment") # type: ignore
|
||||
|
||||
if not activity:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
|
|
@ -116,7 +156,7 @@ def ai_send_activity_chat_message(
|
|||
)
|
||||
|
||||
# Get Activity Content Blocks
|
||||
content = activity.content
|
||||
content = activity.content
|
||||
|
||||
# Serialize Activity Content Blocks to a text comprehensible by the AI
|
||||
structured = structure_activity_content_by_type(content)
|
||||
|
|
@ -124,24 +164,43 @@ def ai_send_activity_chat_message(
|
|||
structured, course, activity
|
||||
)
|
||||
|
||||
# Get Activity Organization
|
||||
statement = select(Organization).where(Organization.id == course.org_id)
|
||||
org = db_session.exec(statement).first()
|
||||
|
||||
# Get Organization Config
|
||||
statement = select(OrganizationConfig).where(
|
||||
OrganizationConfig.org_id == org.id # type: ignore
|
||||
)
|
||||
result = db_session.exec(statement)
|
||||
org_config = result.first()
|
||||
|
||||
org_config = OrganizationConfig.from_orm(org_config)
|
||||
embeddings = org_config.config["AIConfig"]["embeddings"]
|
||||
ai_model = org_config.config["AIConfig"]["ai_model"]
|
||||
|
||||
chat_session = get_chat_session_history(chat_session_object.aichat_uuid)
|
||||
|
||||
message = "You are a helpful Education Assistant, and you are helping a student with the associated Course. "
|
||||
message += "Use the available tools to get context about this question even if the question is not specific enough."
|
||||
message += "For context, this is the Course name :"
|
||||
message += course.name
|
||||
message += " and this is the Lecture name :"
|
||||
message += activity.name
|
||||
message += "."
|
||||
message += "Use your knowledge to help the student if the context is not enough."
|
||||
|
||||
response = ask_ai(
|
||||
chat_session_object.message,
|
||||
chat_session['message_history'],
|
||||
chat_session["message_history"],
|
||||
ai_friendly_text,
|
||||
"You are a helpful Education Assistant, and you are helping a student with the associated Course. "
|
||||
"Use the available tools to get context about this question even if the question is not specific enough."
|
||||
"For context, this is the Course name :"
|
||||
+ course.name
|
||||
+ " and this is the Lecture name :"
|
||||
+ activity.name
|
||||
+ "."
|
||||
"Use your knowledge to help the student if the context is not enough.",
|
||||
message,
|
||||
embeddings,
|
||||
ai_model,
|
||||
)
|
||||
|
||||
return ActivityAIChatSessionResponse(
|
||||
aichat_uuid=chat_session['aichat_uuid'],
|
||||
aichat_uuid=chat_session["aichat_uuid"],
|
||||
activity_uuid=activity.activity_uuid,
|
||||
message=response["output"],
|
||||
)
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ from langchain_core.messages import SystemMessage
|
|||
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
|
||||
AgentTokenBufferMemory,
|
||||
)
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain.agents.agent_toolkits import (
|
||||
create_retriever_tool,
|
||||
|
|
@ -31,6 +32,8 @@ def ask_ai(
|
|||
message_history,
|
||||
text_reference: str,
|
||||
message_for_the_prompt: str,
|
||||
embedding_model_name: str,
|
||||
openai_model_name: str,
|
||||
):
|
||||
# Get API Keys
|
||||
LH_CONFIG = get_learnhouse_config()
|
||||
|
|
@ -41,8 +44,20 @@ def ask_ai(
|
|||
documents = text_splitter.create_documents([text_reference])
|
||||
texts = text_splitter.split_documents(documents)
|
||||
|
||||
# create the open-source embedding function
|
||||
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
|
||||
embedding_models = {
|
||||
"all-MiniLM-L6-v2": SentenceTransformerEmbeddings,
|
||||
"text-embedding-ada-002": OpenAIEmbeddings,
|
||||
}
|
||||
|
||||
embedding_function = None
|
||||
|
||||
if embedding_model_name in embedding_models:
|
||||
if embedding_model_name == "text-embedding-ada-002":
|
||||
embedding_function = embedding_models[embedding_model_name](model=embedding_model_name, api_key=openai_api_key)
|
||||
if embedding_model_name == "all-MiniLM-L6-v2":
|
||||
embedding_function = embedding_models[embedding_model_name](model_name=embedding_model_name)
|
||||
else:
|
||||
embedding_function = embedding_models[embedding_model_name](model_name=embedding_model_name)
|
||||
|
||||
# load it into Chroma and use it as a retriever
|
||||
db = Chroma.from_documents(texts, embedding_function)
|
||||
|
|
@ -53,12 +68,14 @@ def ask_ai(
|
|||
)
|
||||
tools = [tool]
|
||||
|
||||
llm = ChatOpenAI(temperature=0, api_key=openai_api_key, model_name="gpt-3.5-turbo")
|
||||
llm = ChatOpenAI(
|
||||
temperature=0, api_key=openai_api_key, model_name=openai_model_name
|
||||
)
|
||||
|
||||
memory_key = "history"
|
||||
|
||||
memory = AgentTokenBufferMemory(
|
||||
memory_key=memory_key, llm=llm, chat_memory=message_history, max_tokens=1000
|
||||
memory_key=memory_key, llm=llm, chat_memory=message_history, max_token_limit=1000
|
||||
)
|
||||
|
||||
system_message = SystemMessage(content=(message_for_the_prompt))
|
||||
|
|
|
|||
114
apps/api/src/services/ai/utils.py
Normal file
114
apps/api/src/services/ai/utils.py
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
from typing import Literal
|
||||
import redis
|
||||
from fastapi import HTTPException
|
||||
from sqlmodel import Session, select
|
||||
from config.config import get_learnhouse_config
|
||||
from src.db.organization_config import OrganizationConfig
|
||||
from src.db.organizations import Organization
|
||||
|
||||
|
||||
def count_ai_ask(
|
||||
organization: Organization,
|
||||
operation: Literal["increment", "decrement"],
|
||||
):
|
||||
"""
|
||||
Count the number of AI asks
|
||||
"""
|
||||
|
||||
LH_CONFIG = get_learnhouse_config()
|
||||
redis_conn_string = LH_CONFIG.redis_config.redis_connection_string
|
||||
|
||||
if not redis_conn_string:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Redis connection string not found",
|
||||
)
|
||||
|
||||
# Connect to Redis
|
||||
r = redis.Redis.from_url(redis_conn_string)
|
||||
|
||||
if not r:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Could not connect to Redis",
|
||||
)
|
||||
|
||||
# Get the number of AI asks
|
||||
ai_asks = r.get(f"ai_asks:{organization.org_uuid}")
|
||||
|
||||
if ai_asks is None:
|
||||
ai_asks = 0
|
||||
|
||||
# Increment or decrement the number of AI asks
|
||||
if operation == "increment":
|
||||
ai_asks = int(ai_asks) + 1
|
||||
elif operation == "decrement":
|
||||
ai_asks = int(ai_asks) - 1
|
||||
|
||||
# Update the number of AI asks
|
||||
r.set(f"ai_asks:{organization.org_uuid}", ai_asks)
|
||||
|
||||
# Set the expiration time to 30 days
|
||||
r.expire(f"ai_asks:{organization.org_uuid}", 2592000)
|
||||
|
||||
|
||||
def check_limits_and_config(db_session: Session, organization: Organization):
|
||||
"""
|
||||
Check the limits and config of an Organization
|
||||
"""
|
||||
|
||||
# Get the Organization Config
|
||||
statement = select(OrganizationConfig).where(
|
||||
OrganizationConfig.org_id == organization.id
|
||||
)
|
||||
result = db_session.exec(statement)
|
||||
org_config = result.first()
|
||||
|
||||
if org_config is None:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail="Organization has no config",
|
||||
)
|
||||
|
||||
# Check if the Organizations has AI enabled
|
||||
if org_config.config["AIConfig"]["enabled"] == False:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Organization has AI disabled",
|
||||
)
|
||||
|
||||
# Check if the Organization has Limits enabled and if the max_asks limit has been reached
|
||||
if org_config.config["AIConfig"]["limits"]["limits_enabled"] == True:
|
||||
LH_CONFIG = get_learnhouse_config()
|
||||
redis_conn_string = LH_CONFIG.redis_config.redis_connection_string
|
||||
|
||||
if not redis_conn_string:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Redis connection string not found",
|
||||
)
|
||||
|
||||
# Connect to Redis
|
||||
r = redis.Redis.from_url(redis_conn_string)
|
||||
|
||||
if not r:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Could not connect to Redis",
|
||||
)
|
||||
|
||||
# Get the number of AI asks
|
||||
ai_asks = r.get(f"ai_asks:{organization.org_uuid}")
|
||||
|
||||
# Get a number of AI asks
|
||||
if ai_asks is None:
|
||||
ai_asks = 0
|
||||
else:
|
||||
ai_asks = int(ai_asks)
|
||||
|
||||
# Check if the Number of asks is less than the max_asks limit
|
||||
if org_config.config["AIConfig"]["limits"]["max_asks"] <= ai_asks:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Organization has reached the max number of AI asks",
|
||||
)
|
||||
|
|
@ -44,12 +44,12 @@ async def create_image_block(
|
|||
image_file,
|
||||
activity_uuid,
|
||||
block_uuid,
|
||||
["jpg", "jpeg", "png", "gif"],
|
||||
["jpg", "jpeg", "png", "gif", "webp"],
|
||||
block_type,
|
||||
org.org_uuid,
|
||||
str(course.course_uuid),
|
||||
)
|
||||
|
||||
|
||||
# create block
|
||||
block = Block(
|
||||
activity_id=activity.id if activity.id else 0,
|
||||
|
|
|
|||
|
|
@ -1,30 +1,25 @@
|
|||
from src.db.activities import ActivityRead
|
||||
from src.db.courses import CourseRead
|
||||
|
||||
|
||||
def structure_activity_content_by_type(activity):
|
||||
### Get Headings, Texts, Callouts, Answers and Paragraphs from the activity as a big list of strings (text only) and return it
|
||||
content = activity["content"]
|
||||
|
||||
# Get Headings
|
||||
headings = []
|
||||
for item in activity["content"]:
|
||||
if item["type"] == "heading":
|
||||
headings.append(item["content"][0]["text"])
|
||||
|
||||
# Get Callouts
|
||||
callouts = []
|
||||
for item in activity["content"]:
|
||||
if item["type"] == "calloutInfo":
|
||||
# Get every type of text in the callout
|
||||
text = ""
|
||||
for text_item in item["content"]:
|
||||
text += text_item["text"]
|
||||
callouts.append(text)
|
||||
|
||||
# Get Paragraphs
|
||||
paragraphs = []
|
||||
for item in activity["content"]:
|
||||
if item["type"] == "paragraph":
|
||||
paragraphs.append(item["content"][0]["text"])
|
||||
|
||||
for item in content:
|
||||
if 'content' in item:
|
||||
if item["type"] == "heading" and "text" in item["content"][0]:
|
||||
headings.append(item["content"][0]["text"])
|
||||
elif item["type"] in ["calloutInfo", "calloutWarning"] and all("text" in text_item for text_item in item["content"]):
|
||||
callouts.append(
|
||||
"".join([text_item["text"] for text_item in item["content"]])
|
||||
)
|
||||
elif item["type"] == "paragraph" and "text" in item["content"][0]:
|
||||
paragraphs.append(item["content"][0]["text"])
|
||||
|
||||
# TODO: Get Questions and Answers (if any)
|
||||
|
||||
|
|
@ -39,10 +34,14 @@ def structure_activity_content_by_type(activity):
|
|||
# Add Paragraphs
|
||||
data_array.append({"Paragraphs": paragraphs})
|
||||
|
||||
print(data_array)
|
||||
|
||||
return data_array
|
||||
|
||||
|
||||
def serialize_activity_text_to_ai_comprehensible_text(data_array, course: CourseRead, activity: ActivityRead):
|
||||
def serialize_activity_text_to_ai_comprehensible_text(
|
||||
data_array, course: CourseRead, activity: ActivityRead
|
||||
):
|
||||
### Serialize the text to a format that is comprehensible by the AI
|
||||
|
||||
# Serialize Headings
|
||||
|
|
@ -63,9 +62,13 @@ def serialize_activity_text_to_ai_comprehensible_text(data_array, course: Course
|
|||
|
||||
# Get a text that is comprehensible by the AI
|
||||
text = (
|
||||
'Use this as a context ' +
|
||||
'This is a course about "' + course.name + '". '
|
||||
+ 'This is a lecture about "' + activity.name + '". '
|
||||
"Use this as a context "
|
||||
+ 'This is a course about "'
|
||||
+ course.name
|
||||
+ '". '
|
||||
+ 'This is a lecture about "'
|
||||
+ activity.name
|
||||
+ '". '
|
||||
'These are the headings: "'
|
||||
+ serialized_headings
|
||||
+ '" These are the callouts: "'
|
||||
|
|
|
|||
|
|
@ -165,6 +165,7 @@ async def create_org(
|
|||
active=True,
|
||||
),
|
||||
AIConfig=AIConfig(
|
||||
enabled=False,
|
||||
limits=AILimitsSettings(
|
||||
limits_enabled=False,
|
||||
max_asks=0,
|
||||
|
|
@ -210,6 +211,87 @@ async def create_org(
|
|||
return org
|
||||
|
||||
|
||||
# Temporary pre-alpha code
|
||||
async def create_org_with_config(
|
||||
request: Request,
|
||||
org_object: OrganizationCreate,
|
||||
current_user: PublicUser | AnonymousUser,
|
||||
db_session: Session,
|
||||
submitted_config: OrganizationConfigBase,
|
||||
):
|
||||
statement = select(Organization).where(Organization.slug == org_object.slug)
|
||||
result = db_session.exec(statement)
|
||||
|
||||
org = result.first()
|
||||
|
||||
if org:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail="Organization already exists",
|
||||
)
|
||||
|
||||
org = Organization.from_orm(org_object)
|
||||
|
||||
if isinstance(current_user, AnonymousUser):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
detail="You should be logged in to be able to achieve this action",
|
||||
)
|
||||
|
||||
# Complete the org object
|
||||
org.org_uuid = f"org_{uuid4()}"
|
||||
org.creation_date = str(datetime.now())
|
||||
org.update_date = str(datetime.now())
|
||||
|
||||
db_session.add(org)
|
||||
db_session.commit()
|
||||
db_session.refresh(org)
|
||||
|
||||
# Link user to org
|
||||
user_org = UserOrganization(
|
||||
user_id=int(current_user.id),
|
||||
org_id=int(org.id if org.id else 0),
|
||||
role_id=1,
|
||||
creation_date=str(datetime.now()),
|
||||
update_date=str(datetime.now()),
|
||||
)
|
||||
|
||||
db_session.add(user_org)
|
||||
db_session.commit()
|
||||
db_session.refresh(user_org)
|
||||
|
||||
org_config = submitted_config
|
||||
|
||||
org_config = json.loads(org_config.json())
|
||||
|
||||
# OrgSettings
|
||||
org_settings = OrganizationConfig(
|
||||
org_id=int(org.id if org.id else 0),
|
||||
config=org_config,
|
||||
creation_date=str(datetime.now()),
|
||||
update_date=str(datetime.now()),
|
||||
)
|
||||
|
||||
db_session.add(org_settings)
|
||||
db_session.commit()
|
||||
db_session.refresh(org_settings)
|
||||
|
||||
# Get org config
|
||||
statement = select(OrganizationConfig).where(OrganizationConfig.org_id == org.id)
|
||||
result = db_session.exec(statement)
|
||||
|
||||
org_config = result.first()
|
||||
|
||||
if org_config is None:
|
||||
logging.error(f"Organization {org.id} has no config")
|
||||
|
||||
config = OrganizationConfig.from_orm(org_config)
|
||||
|
||||
org = OrganizationRead(**org.dict(), config=config)
|
||||
|
||||
return org
|
||||
|
||||
|
||||
async def update_org(
|
||||
request: Request,
|
||||
org_object: OrganizationUpdate,
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import AuthenticatedClientElement from "@components/Security/AuthenticatedClient
|
|||
import { getCourseThumbnailMediaDirectory } from "@services/media/media";
|
||||
import { useOrg } from "@components/Contexts/OrgContext";
|
||||
import { CourseProvider } from "@components/Contexts/CourseContext";
|
||||
import AIActivityAsk from "@components/AI/AIActivityAsk";
|
||||
import AIActivityAsk from "@components/Objects/Activities/AI/AIActivityAsk";
|
||||
import AIChatBotProvider from "@components/Contexts/AI/AIChatBotContext";
|
||||
|
||||
interface ActivityClientProps {
|
||||
|
|
|
|||
39
apps/web/components/AI/Hooks/useGetAIFeatures.tsx
Normal file
39
apps/web/components/AI/Hooks/useGetAIFeatures.tsx
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
import { useOrg } from '@components/Contexts/OrgContext'
|
||||
import React from 'react'
|
||||
|
||||
interface UseGetAIFeatures {
|
||||
feature: 'editor' | 'activity_ask' | 'course_ask' | 'global_ai_ask',
|
||||
}
|
||||
|
||||
|
||||
function useGetAIFeatures(props: UseGetAIFeatures) {
|
||||
const org = useOrg() as any
|
||||
const [isEnabled, setisEnabled] = React.useState(false)
|
||||
|
||||
function checkAvailableAIFeaturesOnOrg(feature: string) {
|
||||
const config = org.config.config.AIConfig;
|
||||
if (!config.enabled) {
|
||||
console.log("AI is not enabled for this Organization.");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!config.features[feature]) {
|
||||
console.log(`Feature ${feature} is not enabled for this Organization.`);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
React.useEffect(() => {
|
||||
if (org) { // Check if org is not null or undefined
|
||||
let isEnabledStatus = checkAvailableAIFeaturesOnOrg(props.feature)
|
||||
setisEnabled(isEnabledStatus)
|
||||
}
|
||||
}, [org])
|
||||
|
||||
return isEnabled
|
||||
|
||||
}
|
||||
|
||||
export default useGetAIFeatures
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
'use client';
|
||||
import { AIMessage } from '@components/AI/AIActivityAsk';
|
||||
import { AIMessage } from '@components/Objects/Activities/AI/AIActivityAsk';
|
||||
import React, { createContext, useContext, useReducer } from 'react'
|
||||
export const AIChatBotContext = createContext(null) as any;
|
||||
export const AIChatBotDispatchContext = createContext(null) as any;
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
'use client';
|
||||
import { AIMessage } from '@components/AI/AIActivityAsk';
|
||||
import { AIMessage } from '@components/Objects/Activities/AI/AIActivityAsk';
|
||||
import React, { createContext, useContext, useReducer } from 'react'
|
||||
export const AIEditorContext = createContext(null) as any;
|
||||
export const AIEditorDispatchContext = createContext(null) as any;
|
||||
|
|
|
|||
|
|
@ -8,10 +8,11 @@ import Image from 'next/image';
|
|||
import { send } from 'process';
|
||||
import learnhouseAI_icon from "public/learnhouse_ai_simple.png";
|
||||
import learnhouseAI_logo_black from "public/learnhouse_ai_black_logo.png";
|
||||
import React, { useEffect, useRef } from 'react'
|
||||
import React, { use, useEffect, useRef } from 'react'
|
||||
import { AIChatBotStateTypes, useAIChatBot, useAIChatBotDispatch } from '@components/Contexts/AI/AIChatBotContext';
|
||||
import FeedbackModal from '@components/Objects/Modals/Feedback/Feedback';
|
||||
import Modal from '@components/StyledElements/Modal/Modal';
|
||||
import useGetAIFeatures from '../../../AI/Hooks/useGetAIFeatures';
|
||||
|
||||
|
||||
type AIActivityAskProps = {
|
||||
|
|
@ -20,25 +21,38 @@ type AIActivityAskProps = {
|
|||
|
||||
|
||||
function AIActivityAsk(props: AIActivityAskProps) {
|
||||
|
||||
const is_ai_feature_enabled = useGetAIFeatures({ feature: 'activity_ask' });
|
||||
const [isButtonAvailable, setIsButtonAvailable] = React.useState(false);
|
||||
const dispatchAIChatBot = useAIChatBotDispatch() as any;
|
||||
|
||||
useEffect(() => {
|
||||
if (is_ai_feature_enabled) {
|
||||
setIsButtonAvailable(true);
|
||||
}
|
||||
}
|
||||
, [is_ai_feature_enabled]);
|
||||
|
||||
return (
|
||||
<div className=''>
|
||||
<ActivityChatMessageBox activity={props.activity} />
|
||||
<div
|
||||
onClick={() => dispatchAIChatBot({ type: 'setIsModalOpen' })}
|
||||
style={{
|
||||
background: 'conic-gradient(from 32deg at 53.75% 50%, rgb(35, 40, 93) 4deg, rgba(20, 0, 52, 0.95) 59deg, rgba(164, 45, 238, 0.88) 281deg)',
|
||||
}}
|
||||
className="rounded-full px-5 drop-shadow-md flex items-center space-x-1.5 p-2.5 text-sm text-white hover:cursor-pointer transition delay-150 duration-300 ease-in-out hover:scale-105">
|
||||
{" "}
|
||||
<i>
|
||||
<Image className='outline outline-1 outline-neutral-200/20 rounded-md' width={20} src={learnhouseAI_icon} alt="" />
|
||||
</i>{" "}
|
||||
<i className="not-italic text-xs font-bold">Ask AI</i>
|
||||
</div>
|
||||
</div>
|
||||
<>
|
||||
{isButtonAvailable && (
|
||||
<div >
|
||||
<ActivityChatMessageBox activity={props.activity} />
|
||||
<div
|
||||
onClick={() => dispatchAIChatBot({ type: 'setIsModalOpen' })}
|
||||
style={{
|
||||
background: 'conic-gradient(from 32deg at 53.75% 50%, rgb(35, 40, 93) 4deg, rgba(20, 0, 52, 0.95) 59deg, rgba(164, 45, 238, 0.88) 281deg)',
|
||||
}}
|
||||
className="rounded-full px-5 drop-shadow-md flex items-center space-x-1.5 p-2.5 text-sm text-white hover:cursor-pointer transition delay-150 duration-300 ease-in-out hover:scale-105">
|
||||
{" "}
|
||||
<i>
|
||||
<Image className='outline outline-1 outline-neutral-200/20 rounded-md' width={20} src={learnhouseAI_icon} alt="" />
|
||||
</i>{" "}
|
||||
<i className="not-italic text-xs font-bold">Ask AI</i>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -7,6 +7,7 @@ import { BubbleMenu } from '@tiptap/react';
|
|||
import ToolTip from '@components/StyledElements/Tooltip/Tooltip';
|
||||
import { AIChatBotStateTypes, useAIChatBot, useAIChatBotDispatch } from '@components/Contexts/AI/AIChatBotContext';
|
||||
import { sendActivityAIChatMessage, startActivityAIChatSession } from '@services/ai/ai';
|
||||
import useGetAIFeatures from '../../../../AI/Hooks/useGetAIFeatures';
|
||||
|
||||
|
||||
|
||||
|
|
@ -16,23 +17,35 @@ type AICanvaToolkitProps = {
|
|||
}
|
||||
|
||||
function AICanvaToolkit(props: AICanvaToolkitProps) {
|
||||
const is_ai_feature_enabled = useGetAIFeatures({ feature: 'activity_ask' });
|
||||
const [isBubbleMenuAvailable, setIsButtonAvailable] = React.useState(false);
|
||||
|
||||
React.useEffect(() => {
|
||||
if (is_ai_feature_enabled) {
|
||||
setIsButtonAvailable(true);
|
||||
}
|
||||
}, [is_ai_feature_enabled])
|
||||
|
||||
|
||||
return (
|
||||
<BubbleMenu className="w-fit" tippyOptions={{ duration: 100 }} editor={props.editor}>
|
||||
<div style={{ background: 'linear-gradient(0deg, rgba(0, 0, 0, 0.2) 0%, rgba(0, 0, 0, 0.2) 100%), radial-gradient(105.16% 105.16% at 50% -5.16%, rgba(255, 255, 255, 0.18) 0%, rgba(0, 0, 0, 0) 100%), rgba(2, 1, 25, 0.98)' }}
|
||||
className='py-1 h-10 px-2 w-max text-white rounded-xl shadow-md cursor-pointer flex items-center space-x-2 antialiased'
|
||||
>
|
||||
<div className='flex w-full space-x-2 font-bold text-white/80'><Image className='outline outline-1 outline-neutral-200/10 rounded-lg' width={24} src={learnhouseAI_icon} alt="" /> <div>AI</div> </div>
|
||||
<div>
|
||||
<MoreVertical className='text-white/50' size={12} />
|
||||
<>
|
||||
{isBubbleMenuAvailable && <BubbleMenu className="w-fit" tippyOptions={{ duration: 100 }} editor={props.editor}>
|
||||
<div style={{ background: 'linear-gradient(0deg, rgba(0, 0, 0, 0.2) 0%, rgba(0, 0, 0, 0.2) 100%), radial-gradient(105.16% 105.16% at 50% -5.16%, rgba(255, 255, 255, 0.18) 0%, rgba(0, 0, 0, 0) 100%), rgba(2, 1, 25, 0.98)' }}
|
||||
className='py-1 h-10 px-2 w-max text-white rounded-xl shadow-md cursor-pointer flex items-center space-x-2 antialiased'
|
||||
>
|
||||
<div className='flex w-full space-x-2 font-bold text-white/80'><Image className='outline outline-1 outline-neutral-200/10 rounded-lg' width={24} src={learnhouseAI_icon} alt="" /> <div>AI</div> </div>
|
||||
<div>
|
||||
<MoreVertical className='text-white/50' size={12} />
|
||||
</div>
|
||||
<div className='flex space-x-2'>
|
||||
<AIActionButton editor={props.editor} activity={props.activity} label='Explain' />
|
||||
<AIActionButton editor={props.editor} activity={props.activity} label='Summarize' />
|
||||
<AIActionButton editor={props.editor} activity={props.activity} label='Translate' />
|
||||
<AIActionButton editor={props.editor} activity={props.activity} label='Examples' />
|
||||
</div>
|
||||
</div>
|
||||
<div className='flex space-x-2'>
|
||||
<AIActionButton editor={props.editor} activity={props.activity} label='Explain' />
|
||||
<AIActionButton editor={props.editor} activity={props.activity} label='Summarize' />
|
||||
<AIActionButton editor={props.editor} activity={props.activity} label='Translate' />
|
||||
<AIActionButton editor={props.editor} activity={props.activity} label='Examples' />
|
||||
</div>
|
||||
</div>
|
||||
</BubbleMenu>
|
||||
</BubbleMenu>}
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -45,7 +58,7 @@ function AIActionButton(props: { editor: Editor, label: string, activity: any })
|
|||
const prompt = getPrompt(label, selection);
|
||||
dispatchAIChatBot({ type: 'setIsModalOpen' });
|
||||
await sendMessage(prompt);
|
||||
|
||||
|
||||
}
|
||||
|
||||
const getTipTapEditorSelectedText = () => {
|
||||
|
|
@ -24,7 +24,7 @@ import python from 'highlight.js/lib/languages/python'
|
|||
import java from 'highlight.js/lib/languages/java'
|
||||
import { NoTextInput } from "@components/Objects/Editor/Extensions/NoTextInput/NoTextInput";
|
||||
import EditorOptionsProvider from "@components/Contexts/Editor/EditorContext";
|
||||
import AICanvaToolkit from "./Elements/AICanvaToolkit";
|
||||
import AICanvaToolkit from "./AI/AICanvaToolkit";
|
||||
|
||||
|
||||
interface Editor {
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import { Editor } from '@tiptap/react';
|
|||
import { AIChatBotStateTypes, useAIChatBot, useAIChatBotDispatch } from '@components/Contexts/AI/AIChatBotContext';
|
||||
import { AIEditorStateTypes, useAIEditor, useAIEditorDispatch } from '@components/Contexts/AI/AIEditorContext';
|
||||
import { sendActivityAIChatMessage, startActivityAIChatSession } from '@services/ai/ai';
|
||||
import useGetAIFeatures from '@components/AI/Hooks/useGetAIFeatures';
|
||||
|
||||
type AIEditorToolkitProps = {
|
||||
editor: Editor,
|
||||
|
|
@ -22,48 +23,61 @@ type AIPromptsLabels = {
|
|||
function AIEditorToolkit(props: AIEditorToolkitProps) {
|
||||
const dispatchAIEditor = useAIEditorDispatch() as any;
|
||||
const aiEditorState = useAIEditor() as AIEditorStateTypes;
|
||||
const is_ai_feature_enabled = useGetAIFeatures({ feature: 'editor' });
|
||||
const [isToolkitAvailable, setIsToolkitAvailable] = React.useState(true);
|
||||
|
||||
React.useEffect(() => {
|
||||
if (is_ai_feature_enabled) {
|
||||
setIsToolkitAvailable(true);
|
||||
}
|
||||
}, [is_ai_feature_enabled])
|
||||
|
||||
|
||||
return (
|
||||
<AnimatePresence>
|
||||
{aiEditorState.isModalOpen && <motion.div
|
||||
initial={{ y: 20, opacity: 0.3, filter: 'blur(5px)' }}
|
||||
animate={{ y: 0, opacity: 1, filter: 'blur(0px)' }}
|
||||
exit={{ y: 50, opacity: 0, filter: 'blur(3px)' }}
|
||||
transition={{ type: "spring", bounce: 0.35, duration: 1.7, mass: 0.2, velocity: 2 }}
|
||||
className='fixed top-0 left-0 w-full h-full z-50 flex justify-center items-center '
|
||||
style={{ pointerEvents: 'none' }}
|
||||
>
|
||||
<>
|
||||
{aiEditorState.isFeedbackModalOpen && <UserFeedbackModal activity={props.activity} editor={props.editor} />}
|
||||
<div
|
||||
style={{
|
||||
pointerEvents: 'auto',
|
||||
background: 'linear-gradient(0deg, rgba(0, 0, 0, 0.2) 0%, rgba(0, 0, 0, 0.2) 100%), radial-gradient(105.16% 105.16% at 50% -5.16%, rgba(255, 255, 255, 0.18) 0%, rgba(0, 0, 0, 0) 100%), rgb(2 1 25 / 98%)'
|
||||
}}
|
||||
className="z-50 rounded-2xl max-w-screen-2xl my-10 mx-auto w-fit fixed bottom-0 left-1/2 transform -translate-x-1/2 shadow-xl ring-1 ring-inset ring-white/10 text-white p-3 flex-col-reverse backdrop-blur-md">
|
||||
<div className='flex space-x-2'>
|
||||
<div className='pr-1'>
|
||||
<div className='flex w-full space-x-2 font-bold text-white/80 items-center'>
|
||||
<Image className='outline outline-1 outline-neutral-200/20 rounded-lg' width={24} src={learnhouseAI_icon} alt="" />
|
||||
<div >AI Editor</div>
|
||||
<MoreVertical className='text-white/50' size={12} />
|
||||
</div>
|
||||
</div>
|
||||
<div className='tools flex space-x-2'>
|
||||
<AiEditorToolButton label='Writer' />
|
||||
<AiEditorToolButton label='ContinueWriting' />
|
||||
<AiEditorToolButton label='MakeLonger' />
|
||||
<>
|
||||
{isToolkitAvailable && <div className='flex space-x-2'>
|
||||
<AnimatePresence>
|
||||
{aiEditorState.isModalOpen && <motion.div
|
||||
initial={{ y: 20, opacity: 0.3, filter: 'blur(5px)' }}
|
||||
animate={{ y: 0, opacity: 1, filter: 'blur(0px)' }}
|
||||
exit={{ y: 50, opacity: 0, filter: 'blur(3px)' }}
|
||||
transition={{ type: "spring", bounce: 0.35, duration: 1.7, mass: 0.2, velocity: 2 }}
|
||||
className='fixed top-0 left-0 w-full h-full z-50 flex justify-center items-center '
|
||||
style={{ pointerEvents: 'none' }}
|
||||
>
|
||||
<>
|
||||
{aiEditorState.isFeedbackModalOpen && <UserFeedbackModal activity={props.activity} editor={props.editor} />}
|
||||
<div
|
||||
style={{
|
||||
pointerEvents: 'auto',
|
||||
background: 'linear-gradient(0deg, rgba(0, 0, 0, 0.2) 0%, rgba(0, 0, 0, 0.2) 100%), radial-gradient(105.16% 105.16% at 50% -5.16%, rgba(255, 255, 255, 0.18) 0%, rgba(0, 0, 0, 0) 100%), rgb(2 1 25 / 98%)'
|
||||
}}
|
||||
className="z-50 rounded-2xl max-w-screen-2xl my-10 mx-auto w-fit fixed bottom-0 left-1/2 transform -translate-x-1/2 shadow-xl ring-1 ring-inset ring-white/10 text-white p-3 flex-col-reverse backdrop-blur-md">
|
||||
<div className='flex space-x-2'>
|
||||
<div className='pr-1'>
|
||||
<div className='flex w-full space-x-2 font-bold text-white/80 items-center'>
|
||||
<Image className='outline outline-1 outline-neutral-200/20 rounded-lg' width={24} src={learnhouseAI_icon} alt="" />
|
||||
<div >AI Editor</div>
|
||||
<MoreVertical className='text-white/50' size={12} />
|
||||
</div>
|
||||
</div>
|
||||
<div className='tools flex space-x-2'>
|
||||
<AiEditorToolButton label='Writer' />
|
||||
<AiEditorToolButton label='ContinueWriting' />
|
||||
<AiEditorToolButton label='MakeLonger' />
|
||||
|
||||
<AiEditorToolButton label='Translate' />
|
||||
</div>
|
||||
<div className='flex space-x-2 items-center'>
|
||||
<X onClick={() => Promise.all([dispatchAIEditor({ type: 'setIsModalClose' }), dispatchAIEditor({ type: 'setIsFeedbackModalClose' })])} size={20} className='text-white/50 hover:cursor-pointer bg-white/10 p-1 rounded-full items-center' />
|
||||
</div>
|
||||
</div>
|
||||
</div></>
|
||||
</motion.div>}
|
||||
</AnimatePresence>
|
||||
</div>}
|
||||
</>
|
||||
|
||||
<AiEditorToolButton label='Translate' />
|
||||
</div>
|
||||
<div className='flex space-x-2 items-center'>
|
||||
<X onClick={() => Promise.all([dispatchAIEditor({ type: 'setIsModalClose' }), dispatchAIEditor({ type: 'setIsFeedbackModalClose' })])} size={20} className='text-white/50 hover:cursor-pointer bg-white/10 p-1 rounded-full items-center' />
|
||||
</div>
|
||||
</div>
|
||||
</div></>
|
||||
</motion.div>}
|
||||
</AnimatePresence>
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,10 +39,9 @@ import html from 'highlight.js/lib/languages/xml'
|
|||
import python from 'highlight.js/lib/languages/python'
|
||||
import java from 'highlight.js/lib/languages/java'
|
||||
import { CourseProvider } from "@components/Contexts/CourseContext";
|
||||
import { OrgProvider } from "@components/Contexts/OrgContext";
|
||||
import { useSession } from "@components/Contexts/SessionContext";
|
||||
import AIEditorTools from "./AI/AIEditorToolkit";
|
||||
import AIEditorToolkit from "./AI/AIEditorToolkit";
|
||||
import useGetAIFeatures from "@components/AI/Hooks/useGetAIFeatures";
|
||||
|
||||
|
||||
interface Editor {
|
||||
|
|
@ -59,6 +58,14 @@ function Editor(props: Editor) {
|
|||
const session = useSession() as any;
|
||||
const dispatchAIEditor = useAIEditorDispatch() as any;
|
||||
const aiEditorState = useAIEditor() as AIEditorStateTypes;
|
||||
const is_ai_feature_enabled = useGetAIFeatures({ feature: 'editor' });
|
||||
const [isButtonAvailable, setIsButtonAvailable] = React.useState(false);
|
||||
|
||||
React.useEffect(() => {
|
||||
if (is_ai_feature_enabled) {
|
||||
setIsButtonAvailable(true);
|
||||
}
|
||||
}, [is_ai_feature_enabled])
|
||||
|
||||
// remove course_ from course_uuid
|
||||
const course_uuid = props.course.course_uuid.substring(7);
|
||||
|
|
@ -137,7 +144,6 @@ function Editor(props: Editor) {
|
|||
|
||||
return (
|
||||
<Page>
|
||||
<OrgProvider orgslug={props.org?.slug}>
|
||||
<CourseProvider courseuuid={props.course.course_uuid}>
|
||||
<motion.div
|
||||
initial={{ opacity: 0, scale: 0.98 }}
|
||||
|
|
@ -172,7 +178,7 @@ function Editor(props: Editor) {
|
|||
<EditorUsersSection className="space-x-2">
|
||||
<div>
|
||||
<div className="transition-all ease-linear text-teal-100 rounded-md hover:cursor-pointer" >
|
||||
<div
|
||||
{isButtonAvailable && <div
|
||||
onClick={() => dispatchAIEditor({ type: aiEditorState.isModalOpen ? 'setIsModalClose' : 'setIsModalOpen' })}
|
||||
style={{
|
||||
background: 'conic-gradient(from 32deg at 53.75% 50%, rgb(35, 40, 93) 4deg, rgba(20, 0, 52, 0.95) 59deg, rgba(164, 45, 238, 0.88) 281deg)',
|
||||
|
|
@ -183,7 +189,7 @@ function Editor(props: Editor) {
|
|||
<Image className='' width={20} src={learnhouseAI_icon} alt="" />
|
||||
</i>{" "}
|
||||
<i className="not-italic text-xs font-bold">AI Editor</i>
|
||||
</div>
|
||||
</div>}
|
||||
</div>
|
||||
</div>
|
||||
<DividerVerticalIcon style={{ marginTop: "auto", marginBottom: "auto", color: "grey", opacity: '0.5' }} />
|
||||
|
|
@ -224,7 +230,6 @@ function Editor(props: Editor) {
|
|||
</EditorContentWrapper>
|
||||
</motion.div>
|
||||
</CourseProvider>
|
||||
</OrgProvider>
|
||||
</Page>
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import Editor from "./Editor";
|
|||
import { updateActivity } from "@services/courses/activities";
|
||||
import { toast } from "react-hot-toast";
|
||||
import Toast from "@components/StyledElements/Toast/Toast";
|
||||
import { OrgProvider } from "@components/Contexts/OrgContext";
|
||||
|
||||
interface EditorWrapperProps {
|
||||
content: string;
|
||||
|
|
@ -26,7 +27,7 @@ function EditorWrapper(props: EditorWrapperProps): JSX.Element {
|
|||
// setProviderState(provider);
|
||||
setIsLoading(false);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -50,8 +51,9 @@ function EditorWrapper(props: EditorWrapperProps): JSX.Element {
|
|||
} else {
|
||||
return <>
|
||||
<Toast></Toast>
|
||||
<Editor org={props.org} course={props.course} activity={props.activity} content={props.content} setContent={setContent} provider={providerState} ydoc={ydocState}></Editor>;
|
||||
|
||||
<OrgProvider orgslug={props.org.slug}>
|
||||
<Editor org={props.org} course={props.course} activity={props.activity} content={props.content} setContent={setContent} provider={providerState} ydoc={ydocState}></Editor>;
|
||||
</OrgProvider>
|
||||
</>
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue