Skip to content

Commit

Permalink
#79 Merge experimentation pphc-vs-rand
Browse files Browse the repository at this point in the history
  • Loading branch information
dvitel committed Feb 13, 2023
2 parents de4f1d6 + 9fb9322 commit 0bb7283
Show file tree
Hide file tree
Showing 8 changed files with 196 additions and 100 deletions.
56 changes: 54 additions & 2 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@
"run",
"-q", "1",
"-s", "STEP1",
"--algo", "evopie.pphc_quiz_model.PphcQuizModelBuilder", "--algo-params", "{ \"seed\": 595, \"pop_size\": 1, \"pareto_n\": 2, \"child_n\": 1, \"gene_size\": 3}",
"--algo", "evopie.pphc_quiz_model.PphcQuizModel", "--algo-params", "{ \"seed\": 595, \"pop_size\": 1, \"pareto_n\": 2, \"child_n\": 1, \"gene_size\": 3}",
"--evo-output", "algo/pphc-1-2-1-3.json", "--archive-output", "algo/a.csv",
"--random-seed", "17"
]
Expand Down Expand Up @@ -261,7 +261,59 @@
"--fig-name", "<algo>-<param>-<spanned>"
],
"console": "internalConsole"
}
},
{
"name": "CLI: quiz deca-experiment",
"type": "python",
"request": "launch",
"module": "flask",
"env" : {
"FLASK_APP" : "app.py",
"FLASK_ENV" : "development",
"FLASK_DEBUG" : "1"
},
"args" : [
"quiz", "deca-experiment",
"--deca-input", "deca-spaces/space-1_1_1-s_0-2.json",
"--num-runs", "2",
"--algo", "{ \"id\": \"pphc-1-2-1-3\", \"algo\":\"evopie.pphc_quiz_model.PphcQuizModel\", \"pop_size\": 1, \"pareto_n\": 2, \"child_n\": 1, \"gene_size\": 3}"
],
"console": "internalConsole"
},
{
"name": "CLI: quiz deca-experiment RAND",
"type": "python",
"request": "launch",
"module": "flask",
"env" : {
"FLASK_APP" : "app.py",
"FLASK_ENV" : "development",
"FLASK_DEBUG" : "1"
},
"args" : [
"quiz", "deca-experiment",
"--deca-input", "deca-spaces/space-1_1_1-s_0-2.json",
"--num-runs", "2",
"--algo", "{ \"id\": \"rand-3\", \"algo\":\"evopie.rand_quiz_model.RandomQuizModel\", \"n\": 3, \"seed\": 313}"
],
"console": "internalConsole"
},
{
"name": "CLI: quiz export",
"type": "python",
"request": "launch",
"module": "flask",
"env" : {
"FLASK_APP" : "app.py",
"FLASK_ENV" : "development",
"FLASK_DEBUG" : "1"
},
"args" : [
"quiz", "export",
"-q", "1"
],
"console": "internalConsole"
}
]
}

Expand Down
14 changes: 8 additions & 6 deletions evopie/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from evopie import APP, deca, models
from evopie.config import QUIZ_ATTEMPT_STEP1, QUIZ_STEP1, QUIZ_STEP2, ROLE_STUDENT
from evopie.utils import groupby, unpack_key
from evopie.quiz_model import get_quiz_builder, set_default_builder
from evopie.quiz_model import get_quiz_builder, set_quiz_model

def throw_on_http_fail(resp: TestResponse, status: int = 400):
if resp.status_code >= status:
Expand Down Expand Up @@ -476,9 +476,9 @@ def export_student_knowledge(output):
def simulate_quiz(quiz, instructor, password, no_algo, algo, algo_params, rnd, n_times, archive_output, evo_output, step, knowledge_selection, likes, justify_response, email_format, random_seed):
rnd_state = np.random.RandomState(random_seed)
if no_algo:
set_default_builder(None)
set_quiz_model(None)
elif algo is not None:
set_default_builder(algo, settings = json.loads(algo_params) if algo_params is not None else {})
set_quiz_model(algo, settings = json.loads(algo_params) if algo_params is not None else {})

def simulate_step(step):
with APP.app_context():
Expand Down Expand Up @@ -584,8 +584,8 @@ def simulate_step(step):
sys.stdout.write(f"EVO algo: {quiz_model.__class__}\nEVO settings: {model_state}\n")
if evo_output:
with open(evo_output, 'w') as evo_output_json_file:
evo_output_json_file.write(json.dumps({"algo": quiz_model.__class__.__name__,
"quiz_model":model_state, "explored_search_space_size": explored_search_space_size,
evo_output_json_file.write(json.dumps({**model_state, "algo": quiz_model.__class__.__name__,
"explored_search_space_size": explored_search_space_size,
"search_space_size": search_space_size}, indent=4))
sys.stdout.write(f"[{run_idx + 1}/{n_times}] Step1 quiz {quiz} finished\n{quiz_model.to_dataframe()}\n")
if QUIZ_STEP2 in step:
Expand All @@ -607,7 +607,7 @@ def calc_deca_metrics(algo_input, deca_space, params, input_output):
algo_results = json.loads("\n".join(f.readlines()))
with open(deca_space, 'r') as f:
space = deca.load_space_from_json("\n".join(f.readlines()))
population_distractors = algo_results["population_distractors"]
population_distractors = algo_results["distractors"]
metrics_map = {"algo":algo_input,"deca": deca_space, **{p:algo_results.get(p, np.nan) for p in params},
**deca.dimension_coverage(space, population_distractors),
**deca.avg_rank_of_repr(space, population_distractors),
Expand Down Expand Up @@ -731,6 +731,8 @@ def init_experiment(num_questions, num_distractors, num_students, axes_number, a
def run_experiment(deca_input, algo, algo_folder, random_seed, results_folder, num_runs):
runner = APP.test_cli_runner()
res = runner.invoke(args=["student", "knows", "-kr", "--deca-input", deca_input ])
if res.exit_code != 0:
print(res.stdout)
assert res.exit_code == 0
os.makedirs(algo_folder, exist_ok=True)
os.makedirs(results_folder, exist_ok=True)
Expand Down
70 changes: 44 additions & 26 deletions evopie/pphc_quiz_model.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
''' implementation of pphc quiz model '''
from datetime import datetime
from math import comb, prod
import numpy as np
from typing import Any, Optional
from pandas import DataFrame
from typing import Any

from evopie import APP, models
from evopie.utils import groupby
from dataclasses import dataclass
from numpy import unique
from evopie.quiz_model import QuizModel, QuizModelBuilder, GeneBasedUpdateMixin
from evopie.quiz_model import QuizModel, GeneBasedUpdateMixin

@dataclass
class CoevaluationGroup:
Expand All @@ -20,19 +18,22 @@ class CoevaluationGroup:

class PphcQuizModel(QuizModel, GeneBasedUpdateMixin):
''' Base class for evolution process (but still of specific form)'''
default_settings = { "pop_size": 1, "pareto_n": 2, "child_n": 1, "gene_size": 3}

def __init__(self, quiz_id: int, process: models.EvoProcess, distractors_per_question: 'dict[int, list[int]]'):
super(PphcQuizModel, self).__init__(quiz_id, process, distractors_per_question)
self.gen: int = process.impl_state.get("gen", 0)
self.rnd = np.random.RandomState(process.impl_state.get("seed", None))
self.seed = int(self.rnd.get_state()[1][0])
self.pop_size: int = process.impl_state.get("pop_size", 1)
self.pareto_n: int = process.impl_state.get("pareto_n", 2)
self.child_n: int = process.impl_state.get("child_n", 1)
self.gene_size: int = process.impl_state.get("gene_size", 3)
settings = {**PphcQuizModel.default_settings, **process.impl_state}
self.gen: int = settings.get("gen", 0)
self.seed = settings.get("seed", None)
self.rnd = np.random.RandomState(self.seed)
self.pop_size: int = settings.get("pop_size", 1)
self.pareto_n: int = settings.get("pareto_n", 2)
self.child_n: int = settings.get("child_n", 1)
self.gene_size: int = settings.get("gene_size", 3)
self.coevaluation_groups: dict[str, CoevaluationGroup] = { cgid:CoevaluationGroup(g["inds"], g["objs"], g['ppos'])
for cgid, g in process.impl_state.get("coevaluation_groups", {}).items()}
for cgid, g in settings.get("coevaluation_groups", {}).items()}
self.evaluator_coevaluation_groups: dict[int, str] = { int(evaluator_id):str(group_id)
for evaluator_id, group_id in process.impl_state.get("evaluator_coevaluation_groups", {}).items() }
for evaluator_id, group_id in settings.get("evaluator_coevaluation_groups", {}).items() }
#add to population new inds till moment of pop_size
#noop if already inited
while len(self.population) < self.pop_size:
Expand All @@ -49,15 +50,41 @@ def mutate_population(self):
def compare(self, coeval_group_id: str, coeval_group: CoevaluationGroup):
''' Implements Pareto comparison '''
if len(coeval_group.objs) >= self.pareto_n:
genotype_evaluations = {genotype_id: evals
for genotype_id, evals in self.archive.at(coeval_group.inds, coeval_group.objs)}
genotype_evaluations = {genotype_id: evals for genotype_id, evals in self.archive.at(coeval_group.inds, coeval_group.objs)}
genotype_evaluation_count = {genotype_id: num_evals for genotype_id, num_evals in self.archive.num_interactions(coeval_group.inds)}
parent = coeval_group.inds[0]
parent_genotype_evaluation = genotype_evaluations[parent]
parent_genotype_evaluation_count = genotype_evaluation_count[parent]
# possible_winners = [ child for child in eval_group.inds[1:]
# if (genotype_evaluations[child] == parent_genotype_evaluation).all() or not((genotype_evaluations[child] <= parent_genotype_evaluation).all()) ] #Pareto check

def child_is_better(parent_genotype_evaluation, parent_genotype_evaluation_count,
child_genotype_evaluation, child_genotype_evaluation_count):
child_domination = child_genotype_evaluation >= parent_genotype_evaluation
parent_domination = child_genotype_evaluation <= parent_genotype_evaluation
if (parent_genotype_evaluation == child_genotype_evaluation).all(): #Pareto check
#non-domination and same outcome for given students
#prefer child for diversity - other statistics is necessary
return child_genotype_evaluation_count <= parent_genotype_evaluation_count #pick child if less evaluated
elif child_domination.all():
return True
elif parent_domination.all():
return False
else: #non-pareto-comparable
#NOTE: if we use aggregation here - it will be same to non-Pareto comparison
return child_genotype_evaluation_count <= parent_genotype_evaluation_count #pick child if less evaluated
# child_sum = (child_genotype_evaluation.sum(), child_domination.sum())
# parent_sum = (parent_genotype_evaluation.sum(), parent_domination.sum())
# if child_sum == parent_sum:
# #prefer child for diversity but better to check other stats
# return True
# elif child_sum > parent_sum:
# return True
# else: #child_sum < parent_sum
# return False
possible_winners = [ child for child in coeval_group.inds[1:]
if not((genotype_evaluations[child] == parent_genotype_evaluation).all()) and (genotype_evaluations[child] >= parent_genotype_evaluation).all() ] #Pareto check
if child_is_better(parent_genotype_evaluation, parent_genotype_evaluation_count,
genotype_evaluations[child], genotype_evaluation_count[child]) ]

# possible_winners = [ child for child in eval_group.inds[1:]
# if (genotype_evaluations[child] >= parent_genotype_evaluation).all() ] #Pareto check
Expand Down Expand Up @@ -172,13 +199,4 @@ def get_internal_model(self):
"gene_size": self.gene_size, "pareto_n": self.pareto_n, "child_n": self.child_n,
"coevaluation_groups": {id: {"inds": g.inds, "objs": g.objs, "ppos": g.ppos} for id, g in self.coevaluation_groups.items()},
"evaluator_coevaluation_groups": self.evaluator_coevaluation_groups}
return {"population": population, "distractors": distractors, "settings": settings}

class PphcQuizModelBuilder(QuizModelBuilder):
def __init__(self, **kwargs) -> None:
self.default_settings = { "pop_size": 1, "pareto_n": 2, "child_n": 1, "gene_size": 3}
self.settings = {**self.default_settings, **kwargs}
def get_settings(self):
return self.settings
def get_quiz_model_class(self):
return PphcQuizModel
return {"population": population, "distractors": distractors, "settings": settings}
Loading

0 comments on commit 0bb7283

Please sign in to comment.