Skip to content

Commit

Permalink
Merge branch 'master' into interaction-cooldown
Browse files Browse the repository at this point in the history
  • Loading branch information
jalsol authored May 14, 2023
2 parents fe579b8 + 221d4e9 commit 1b2ce46
Show file tree
Hide file tree
Showing 3 changed files with 101 additions and 35 deletions.
2 changes: 1 addition & 1 deletion judge/jinja2/reference.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def link_user(user):

return mark_safe(f'<span class="{profile.css_class}">'
f'<a href="{escape(reverse("user_page", args=[user.username]))}"'
f' style="display: inline-flex; align-items: center;">'
f' style="display: inline-block; align-items: center;">'
f'{escape(profile.display_name)}</a>{display_badge_img}</span>')


Expand Down
132 changes: 99 additions & 33 deletions judge/management/commands/import_polygon_package.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,50 +250,114 @@ def parse_tests(problem_meta, root, package):
print(f'Time limit: {problem_meta["time_limit"]}s')
print(f'Memory limit: {problem_meta["memory_limit"] // 1024}MB')

problem_meta['cases'] = []
problem_meta['cases_data'] = []
problem_meta['batches'] = {}
problem_meta['normal_cases'] = []
problem_meta['zipfile'] = os.path.join(problem_meta['tmp_dir'].name, 'tests.zip')
tests_zip = zipfile.ZipFile(problem_meta['zipfile'], 'w')

input_path_pattern = testset.find('input-path-pattern').text
answer_path_pattern = testset.find('answer-path-pattern').text
total_points = 0
# Tests can be aggregated into batches (called groups in Polygon).
# Each batch can have one of two point policies:
# - complete-group: contestant gets points only if all tests in the batch are solved.
# - each-test: contestant gets points for each test solved
# Our judge only supports complete-group batches.
# For each-test batches, their tests are added as normal tests.
# Each batch can also have a list of dependencies, which are other batches
# that must be fully solved before the batch is run.
# To support dependencies, we just add all dependent tests before the actual tests.
# (There is actually a more elegant way to do this by using field `dependencies` in init.yml,
# but site does not support it yet)
# Our judge does cache result for each test, so the same test will not be run twice.
# In addition, we only support dependencies for complete-group batches.
# (Technically, we could support dependencies for each-test batch by splitting it
# into multiple complete-group batches, but that's too complicated)

groups = testset.find('groups')
if groups is not None:
for group in groups.getchildren():
name = group.get('name')
points = int(float(group.get('points', 0)))
points_policy = group.get('points-policy')
if points_policy == 'complete-group':
points = int(float(group.get('points', 0)))
problem_meta['batches'][group.get('name')] = {'points': points, 'cases': []}
total_points += points

for i, test in enumerate(testset.find('tests').getchildren(), start=1):
input_path = input_path_pattern % i
answer_path = answer_path_pattern % i
points = int(float(test.get('points', 0)))
total_points += points

tests_zip.writestr(f'{i:02d}.inp', package.read(input_path))
tests_zip.writestr(f'{i:02d}.out', package.read(answer_path))

group = test.get('group', '')
if group in problem_meta['batches']:
problem_meta['batches'][group]['cases'].append({
'input_file': f'{i:02d}.inp',
'output_file': f'{i:02d}.out',
})
else:
problem_meta['cases'].append({
'input_file': f'{i:02d}.inp',
'output_file': f'{i:02d}.out',
dependencies = group.find('dependencies')
if dependencies is None:
dependencies = []
else:
dependencies = [d.get('group') for d in dependencies.getchildren()]

assert points_policy in ['complete-group', 'each-test']
if points_policy == 'each-test' and len(dependencies) > 0:
raise CommandError('dependencies are only supported for batches with complete-group point policy')

problem_meta['batches'][name] = {
'name': name,
'points': points,
'points_policy': points_policy,
'dependencies': dependencies,
'cases': [],
}

with zipfile.ZipFile(problem_meta['zipfile'], 'w') as tests_zip:
input_path_pattern = testset.find('input-path-pattern').text
answer_path_pattern = testset.find('answer-path-pattern').text
for i, test in enumerate(testset.find('tests').getchildren()):
points = int(float(test.get('points', 0)))
input_path = input_path_pattern % (i + 1)
answer_path = answer_path_pattern % (i + 1)
input_file = f'{(i + 1):02d}.inp'
output_file = f'{(i + 1):02d}.out'

tests_zip.writestr(input_file, package.read(input_path))
tests_zip.writestr(output_file, package.read(answer_path))

problem_meta['cases_data'].append({
'index': i,
'input_file': input_file,
'output_file': output_file,
'points': points,
})

tests_zip.close()
group = test.get('group', '')
if group in problem_meta['batches']:
problem_meta['batches'][group]['cases'].append(i)
else:
problem_meta['normal_cases'].append(i)

def get_tests_by_batch(name):
batch = problem_meta['batches'][name]

if len(batch['dependencies']) == 0:
return batch['cases']

# Polygon guarantees no cycles
cases = set(batch['cases'])
for dependency in batch['dependencies']:
cases.update(get_tests_by_batch(dependency))

batch['dependencies'] = []
batch['cases'] = list(cases)
return batch['cases']

each_test_batches = []
for batch in problem_meta['batches'].values():
if batch['points_policy'] == 'each-test':
each_test_batches.append(batch['name'])
problem_meta['normal_cases'] += batch['cases']
continue

batch['cases'] = get_tests_by_batch(batch['name'])

for batch in each_test_batches:
del problem_meta['batches'][batch]

# Sort tests by index
problem_meta['normal_cases'].sort()
for batch in problem_meta['batches'].values():
batch['cases'].sort()

print(f'Found {len(testset.find("tests").getchildren())} testcases!')
print(f'Found {len(testset.find("tests").getchildren())} tests!')
print(f'Parsed as {len(problem_meta["batches"])} batches and {len(problem_meta["normal_cases"])} normal tests!')

total_points = (sum(b['points'] for b in problem_meta['batches'].values()) +
sum(problem_meta['cases_data'][i]['points'] for i in problem_meta['normal_cases']))
if total_points == 0:
print('Total points is zero. Set partial to False')
problem_meta['partial'] = False
Expand Down Expand Up @@ -538,8 +602,9 @@ def create_problem(problem_meta):
start_batch = ProblemTestCase(dataset=problem, order=order, type='S', points=batch['points'], is_pretest=False)
start_batch.save()

for case_data in batch['cases']:
for case_index in batch['cases']:
order += 1
case_data = problem_meta['cases_data'][case_index]
case = ProblemTestCase(
dataset=problem,
order=order,
Expand All @@ -554,8 +619,9 @@ def create_problem(problem_meta):
end_batch = ProblemTestCase(dataset=problem, order=order, type='E', is_pretest=False)
end_batch.save()

for case_data in problem_meta['cases']:
for case_index in problem_meta['normal_cases']:
order += 1
case_data = problem_meta['cases_data'][case_index]
case = ProblemTestCase(
dataset=problem,
order=order,
Expand Down
2 changes: 1 addition & 1 deletion resources/ranks.scss
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ svg.rate-box {
}

.rate-legendary-grandmaster a::first-letter {
color: $color_primary0;
color: $color_primary100;
}

.rate-group {
Expand Down

0 comments on commit 1b2ce46

Please sign in to comment.