Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

run test_balancing_learner for all strategies #218

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 56 additions & 1 deletion adaptive/tests/test_balancing_learner.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,20 @@
# -*- coding: utf-8 -*-

import random

import pytest

from adaptive.learner import BalancingLearner, Learner1D
from adaptive.learner import (
BalancingLearner,
Learner1D,
Learner2D,
LearnerND,
AverageLearner,
SequenceLearner,
)
from adaptive.runner import simple

from .test_learners import generate_random_parametrization, run_with

strategies = ["loss", "loss_improvements", "npoints", "cycle"]

Expand Down Expand Up @@ -65,3 +75,48 @@ def test_strategies(strategy, goal):
learners = [Learner1D(lambda x: x, bounds=(-1, 1)) for i in range(10)]
learner = BalancingLearner(learners, strategy=strategy)
simple(learner, goal=goal)


@run_with(
Learner1D,
Learner2D,
LearnerND,
AverageLearner,
SequenceLearner,
with_all_loss_functions=False,
)
@pytest.mark.parametrize("strategy", ["loss", "loss_improvements", "npoints", "cycle"])
def test_balancing_learner(learner_type, f, learner_kwargs, strategy):
"""Test if the BalancingLearner works with the different types of learners."""
learners = [
learner_type(generate_random_parametrization(f), **learner_kwargs)
for i in range(4)
]

learner = BalancingLearner(learners, strategy=strategy)

# Emulate parallel execution
stash = []

for i in range(100):
n = random.randint(1, 10)
m = random.randint(0, n)
xs, _ = learner.ask(n, tell_pending=False)

# Save 'm' random points out of `xs` for later
random.shuffle(xs)
for _ in range(m):
stash.append(xs.pop())

for x in xs:
learner.tell(x, learner.function(x))

# Evaluate and add 'm' random points from `stash`
random.shuffle(stash)
for _ in range(m):
x = stash.pop()
learner.tell(x, learner.function(x))

assert all(l.npoints > 10 for l in learner.learners), [
l.npoints for l in learner.learners
]
46 changes: 1 addition & 45 deletions adaptive/tests/test_learners.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@

try:
from adaptive.learner import SKOptLearner
except ModuleNotFoundError:
except (ModuleNotFoundError, ImportError):
SKOptLearner = None


Expand Down Expand Up @@ -456,50 +456,6 @@ def test_learner_performance_is_invariant_under_scaling(
assert math.isclose(learner.loss(), control.loss(), rel_tol=1e-10)


@run_with(
Learner1D,
Learner2D,
LearnerND,
AverageLearner,
SequenceLearner,
with_all_loss_functions=False,
)
def test_balancing_learner(learner_type, f, learner_kwargs):
"""Test if the BalancingLearner works with the different types of learners."""
learners = [
learner_type(generate_random_parametrization(f), **learner_kwargs)
for i in range(4)
]

learner = BalancingLearner(learners)

# Emulate parallel execution
stash = []

for i in range(100):
n = random.randint(1, 10)
m = random.randint(0, n)
xs, _ = learner.ask(n, tell_pending=False)

# Save 'm' random points out of `xs` for later
random.shuffle(xs)
for _ in range(m):
stash.append(xs.pop())

for x in xs:
learner.tell(x, learner.function(x))

# Evaluate and add 'm' random points from `stash`
random.shuffle(stash)
for _ in range(m):
x = stash.pop()
learner.tell(x, learner.function(x))

assert all(l.npoints > 10 for l in learner.learners), [
l.npoints for l in learner.learners
]


@run_with(
Learner1D,
Learner2D,
Expand Down