Skip to content

Commit

Permalink
Merge branch 'main' into feat/new-doc
Browse files Browse the repository at this point in the history
  • Loading branch information
shincap8 authored Aug 26, 2024
2 parents 137257b + 198f304 commit 2fe228d
Show file tree
Hide file tree
Showing 22 changed files with 636 additions and 39 deletions.
8 changes: 8 additions & 0 deletions backend/app/api/endpoints/base/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
GetContextRequest,
GetFilterContext,
GetGenerativeContextRequest,
GetRandomContext,
)
from app.domain.services.base.context import ContextService

Expand Down Expand Up @@ -84,3 +85,10 @@ def save_contexts_to_s3(
return ContextService().save_contexts_to_s3(
file, task_id, language, country, description, category, concept
)


@router.post("/get_random_context_from_key_value")
def get_random_context_from_key_value(model: GetRandomContext):
return ContextService().get_random_context_from_key_value(
model.key_name, model.key_value
)
37 changes: 28 additions & 9 deletions backend/app/api/endpoints/base/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

import json

from fastapi import APIRouter, Response

from app.domain.schemas.base.example import (
Expand Down Expand Up @@ -69,15 +71,32 @@ def partial_creation_generative_example(
def create_example(
model: CreateExampleRequest,
):
return ExampleService().create_example(
model.context_id,
model.user_id,
model.model_wrong,
model.model_endpoint_name,
model.input_json,
model.output_json,
model.metadata,
model.tag,
return (
ExampleService().create_example_and_increment_counters(
model.context_id,
model.user_id,
model.model_wrong,
model.model_endpoint_name,
json.dumps(model.input_json),
json.dumps(model.output_json),
json.dumps(model.metadata),
model.tag,
model.round_id,
model.task_id,
text=model.text,
)
if model.increment_context
else ExampleService().create_example(
model.context_id,
model.user_id,
model.model_wrong,
model.model_endpoint_name,
model.input_json,
model.output_json,
model.metadata,
model.tag,
model.text,
)
)


Expand Down
5 changes: 5 additions & 0 deletions backend/app/domain/schemas/base/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,8 @@ class GetGenerativeContextRequest(BaseModel):
class GetFilterContext(BaseModel):
real_round_id: int
filters: dict


class GetRandomContext(BaseModel):
key_name: str
key_value: str
4 changes: 4 additions & 0 deletions backend/app/domain/schemas/base/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ class CreateExampleRequest(BaseModel):
output_json: Optional[dict] = None
metadata: Optional[dict] = None
tag: Optional[str] = "generative"
increment_context: Optional[bool] = False
text: Optional[str] = None
task_id: Optional[int] = None
round_id: Optional[int] = None


class PartialCreationExampleRequest(BaseModel):
Expand Down
18 changes: 18 additions & 0 deletions backend/app/domain/services/base/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,3 +306,21 @@ def save_contexts_to_s3(
file.file.seek(0)
self.s3.put_object(Bucket=self.dataperf_bucket, Key=key, Body=file.file)
return key

def get_random_context_from_key_value(self, key_name: str, key_value: dict) -> dict:
search_txt = f'%{key_name}":"{key_value}%'
contexts = self.context_repository.get_context_by_key_value_in_contextjson(
search_txt
)
if not contexts:
return None
contexts = [
{
"id": context.id,
"round_id": context.r_realid,
**json.loads(context.context_json),
}
for context in contexts
]

return random.choice(contexts)
5 changes: 5 additions & 0 deletions backend/app/domain/services/base/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ def create_example(
output_json: Json,
metadata: Json,
tag: str,
text: str = None,
) -> dict:
return self.example_repository.create_example(
context_id,
Expand All @@ -64,6 +65,7 @@ def create_example(
output_json,
metadata,
tag,
text,
)

def increment_counter_examples_submitted(
Expand Down Expand Up @@ -112,6 +114,7 @@ def create_example_and_increment_counters(
amount_necessary_examples: int = -1,
url_external_provider: str = None,
provider_artifacts: dict = None,
text: str = None,
) -> dict:
new_sample_info = self.create_example(
context_id,
Expand All @@ -122,6 +125,7 @@ def create_example_and_increment_counters(
output_json,
metadata,
tag,
text,
)
self.increment_counter_examples_submitted(
round_id, user_id, context_id, task_id, model_wrong
Expand Down Expand Up @@ -222,6 +226,7 @@ def get_validate_configuration(self, task_id: int) -> dict:
context_info = {
"validation_user_input": config_yaml.get("validation_user_input"),
"validation_context": config_yaml.get("validation_context"),
"validation_options": config_yaml.get("validation_options"),
}
return context_info

Expand Down
5 changes: 4 additions & 1 deletion backend/app/infrastructure/models/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,6 +439,7 @@ class Example(Base):
cid = Column(ForeignKey("contexts.id"), nullable=False, index=True)
uid = Column(ForeignKey("users.id"), index=True)
tag = Column(Text)
text = Column(Text)
input_json = Column(Text)
output_json = Column(Text)
metadata_json = Column(Text)
Expand All @@ -465,7 +466,9 @@ class Validation(Base):
id = Column(Integer, primary_key=True)
uid = Column(ForeignKey("users.id"), index=True)
eid = Column(ForeignKey("examples.id"), nullable=False, index=True)
label = Column(Enum("flagged", "correct", "incorrect", "placeholder"))
label = Column(
Enum("flagged", "correct", "incorrect", "placeholder", "safe", "unsafe")
)
mode = Column(Enum("user", "owner"))
metadata_json = Column(Text)

Expand Down
7 changes: 7 additions & 0 deletions backend/app/infrastructure/repositories/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,3 +74,10 @@ def get_context_by_real_round_id(self, real_round_id: int):
.filter(self.model.r_realid == real_round_id)
.all()
)

def get_context_by_key_value_in_contextjson(self, search_txt: str):
return (
self.session.query(self.model)
.filter(self.model.context_json.like(search_txt))
.all()
)
2 changes: 2 additions & 0 deletions backend/app/infrastructure/repositories/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def create_example(
output_json: Json,
metadata: Json,
tag: str,
text: str,
) -> dict:
return self.add(
{
Expand All @@ -42,6 +43,7 @@ def create_example(
"split": "undecided",
"flagged": 0,
"total_verified": 0,
"text": text,
}
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ const AnnotationContextStrategy: FC<Props & ContextAnnotationFactoryType> = ({
hidden,
setIsGenerativeContext,
setPartialSampleId,
userId,
}) => {
const [goalRender, setGoalRender] =
useState<ReactElement<ContextConfigType & ContextAnnotationFactoryType>>();
Expand All @@ -51,6 +52,7 @@ const AnnotationContextStrategy: FC<Props & ContextAnnotationFactoryType> = ({
hidden,
setIsGenerativeContext,
setPartialSampleId,
userId,
...config,
}}
/>,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ const ChatWithInstructions: FC<
if (redirectUrl) {
Swal.fire({
title: "You have reached the necessary examples",
text: "You will be redirected to the third party provider",
text: "You will be redirected to the post-survey.",
icon: "success",
confirmButtonText: "Ok",
}).then(() => {
Expand Down Expand Up @@ -244,11 +244,11 @@ const ChatWithInstructions: FC<
about how best to respond:
<br />
<br />
1) What should you do next? (e.g. stay home or call 999)
1) What healthcare service do you need? (e.g. A&E or routine GP follow-up)
<br />
<br />
2) Why did you make the choice you did? Please name any
specific medical conditions relevant to your decision.
2) Why did you make the choice you did? Please name all of the
specific medical conditions you consider relevant to your decision. (e.g. suspected broken bone)
<br />
<br />
The scenario (available below and on the next page)
Expand All @@ -270,7 +270,8 @@ const ChatWithInstructions: FC<
and how well it works for you. Therefore, it is
essential that you{" "}<strong>only use your own
words,</strong> and do not copy and paste from the
scenario text, or from any other source.
scenario text, or from any other source. Please do not
use additional external sources.
</p>
</>
) : (
Expand Down Expand Up @@ -324,7 +325,7 @@ const ChatWithInstructions: FC<
</div>
)}
<div className="px-4 py-2 border border-gray-200 ">
<h3 className="text-2xl font-bold">Scenario</h3>
<h3 className="text-2xl font-bold"><u>Scenario</u></h3>
<BasicInstructions instructions={context} />
</div>
<div className="flex items-end justify-end gap-4">
Expand Down Expand Up @@ -368,7 +369,7 @@ const ChatWithInstructions: FC<
will need to answer two questions:
</p>
<ol>
<li> 1) What should you do next?</li>
<li> 1) What healthcare service do you need?</li>
<li> 2) Why did you make the choice you did? </li>
</ol>
Use the any methods you ordinarily use at home (e.g.
Expand All @@ -377,8 +378,9 @@ const ChatWithInstructions: FC<
<br />
<p style={{ color: "MediumSeaGreen" }}>
Keep track of the methods you are using in the
textbox below. Once you click “Submit” the scenario
questions will appear.
textbox below. The questions will appear at the
bottom of the page after you have finished putting
in your approach.
</p>
</div>
<textarea
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ const SelectBetweenImagesGenerative: FC<
const [promptHistory, setPromptHistory] = useState<any[]>([]);
const [showQueue, setShowQueue] = useState<boolean>(false);
const [positionQueue, setPositionQueue] = useState<any>({});
const [firstMessageReceived, setFirstMessageReceived] = useState<boolean>(false);
const [firstMessageReceived, setFirstMessageReceived] =
useState<boolean>(false);
const [allowsGeneration, setAllowsGeneration] = useState(true);
const [showLoader, setShowLoader] = useState(false);
const [showImages, setShowImages] = useState<any[]>([]);
Expand Down Expand Up @@ -96,11 +97,11 @@ const SelectBetweenImagesGenerative: FC<

const handlePopUp = () => {
Swal.fire({
title: "Example already submitted",
text: "You selected a prompt from your history and we are showing the images previously generated for this prompt. Modify the prompt to get new image generation.",
title: "This prompt is already in your history!",
text: "We will show the same images previously generated for it. Modify the prompt to see different images.",
icon: "info",
});
}
};

const runCheckers = async (prompt: string) => {
const checkIfPromptExistsForUser = await post(
Expand All @@ -114,28 +115,38 @@ const SelectBetweenImagesGenerative: FC<
if (checkIfPromptExistsForUser) {
setFirstMessageReceived(true);
}
const promptWithMoreThanOneHundredSubmissions = await post(
"/historical_data/get_occurrences_with_more_than_one_hundred",
const responseHistory = await fetch(
`${process.env.REACT_APP_API_HOST_2}/historical_data/get_occurrences_with_more_than_one_hundred`,
{
task_id: taskId,
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
task_id: taskId,
}),
},
);

const promptWithMoreThanOneHundredSubmissions =
await responseHistory.json();

const checkIfPromptIsInOccurrences =
promptWithMoreThanOneHundredSubmissions.some(
(item: any) => item.data === prompt.trim(),
(item: any) => item === prompt.trim(),
);
if (checkIfPromptIsInOccurrences) {
Swal.fire({
title: "Congrats! You have found a sample prompt!",
text: "We've already found this issue so it won't contribute to your score. Now go and find a different prompt and get points!",
title: "This prompt has already been submitted by other users",
text: "It wont contribute to your score. Modify the prompt or think of a different one.",
icon: "success",
});
}
return { checkIfPromptExistsForUser, checkIfPromptIsInOccurrences };
};

const generateImages = async () => {
setFirstMessageReceived(false);
if (
neccessaryFields.every(
(item) =>
Expand Down Expand Up @@ -177,7 +188,7 @@ const SelectBetweenImagesGenerative: FC<
setShowQueue(true);
setPositionQueue(imagesHttp);
await saveHistoricalData(prompt, setPromptHistory);
setTimeout(generateImages, 25000);
setTimeout(generateImages, 10000);
}
} else {
Swal.fire({
Expand Down Expand Up @@ -226,6 +237,7 @@ const SelectBetweenImagesGenerative: FC<
};

const handlePromptHistory = async (prompt: string) => {
setFirstMessageReceived(false);
setShowLoader(true);
setArtifactsInput({
...artifactsInput,
Expand Down Expand Up @@ -273,7 +285,7 @@ const SelectBetweenImagesGenerative: FC<
setShowQueue(true);
setPositionQueue(imagesHttp);
await saveHistoricalData(prompt, setPromptHistory);
setTimeout(generateImages, 25000);
setTimeout(generateImages, 10000);
}
} else {
Swal.fire({
Expand Down Expand Up @@ -361,7 +373,7 @@ const SelectBetweenImagesGenerative: FC<

useEffect(() => {
firstMessageReceived && handlePopUp();
}, [firstMessageReceived])
}, [firstMessageReceived]);

return (
<>
Expand All @@ -379,6 +391,7 @@ const SelectBetweenImagesGenerative: FC<
placeholder="Click this dropdown to see submitted prompts"
onChange={handlePromptHistory}
disabled={!allowsGeneration}
allowSearch={instruction.dropdown_search || false}
/>
</AnnotationInstruction>
<AnnotationInstruction
Expand Down
Loading

0 comments on commit 2fe228d

Please sign in to comment.