diff --git a/neat/checkpoint.py b/neat/checkpoint.py index d2899e9a..f587ca06 100644 --- a/neat/checkpoint.py +++ b/neat/checkpoint.py @@ -15,7 +15,7 @@ class Checkpointer(BaseReporter): to save and restore populations (and other aspects of the simulation state). """ - def __init__(self, generation_interval=100, time_interval_seconds=300, + def __init__(self, generation_interval, time_interval_seconds=None, filename_prefix='neat-checkpoint-'): """ Saves the current state (at the end of a generation) every ``generation_interval`` generations or @@ -66,9 +66,11 @@ def save_checkpoint(self, config, population, species_set, generation): pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL) @staticmethod - def restore_checkpoint(filename): + def restore_checkpoint(filename, new_config=None): """Resumes the simulation from a previous saved point.""" with gzip.open(filename) as f: generation, config, population, species_set, rndstate = pickle.load(f) random.setstate(rndstate) + if new_config is not None: + config = new_config return Population(config, (population, species_set, generation)) diff --git a/neat/genome.py b/neat/genome.py index 2d652650..d096da84 100644 --- a/neat/genome.py +++ b/neat/genome.py @@ -341,7 +341,7 @@ def mutate_add_connection(self, config): possible_outputs = list(self.nodes) out_node = choice(possible_outputs) - possible_inputs = possible_outputs + config.input_keys + possible_inputs = list((set(self.nodes)- set(config.output_keys)) | set(config.input_keys) ) in_node = choice(possible_inputs) # Don't duplicate connections. diff --git a/neat/graphs.py b/neat/graphs.py index 0f3c6fc2..da0e05a5 100644 --- a/neat/graphs.py +++ b/neat/graphs.py @@ -1,5 +1,5 @@ """Directed graph algorithm implementations.""" - +from collections import defaultdict, deque def creates_cycle(connections, test): """ @@ -38,20 +38,35 @@ def required_for_output(inputs, outputs, connections): """ assert not set(inputs).intersection(outputs) + # Create a graph representation of the connections + graph = defaultdict(list) + reverse_graph = defaultdict(list) + for a, b in connections: + graph[a].append(b) + reverse_graph[b].append(a) + + # Perform a breadth-first search (BFS) from each input to find all reachable nodes + reachable = set(inputs) + queue = deque(inputs) + + while queue: + node = queue.popleft() + for neighbor in graph[node]: + if neighbor not in reachable: + reachable.add(neighbor) + queue.append(neighbor) + + # Now, traverse from the outputs and find all nodes that are required to reach the outputs required = set(outputs) s = set(outputs) - while 1: - # Find nodes not in s whose output is consumed by a node in s. - t = set(a for (a, b) in connections if b in s and a not in s) + while True: + # Find nodes not in s whose output is consumed by a node in s and is reachable from inputs + t = set(a for (a, b) in connections if b in s and a not in s and a in reachable) if not t: break - layer_nodes = set(x for x in t if x not in inputs) - if not layer_nodes: - break - - required = required.union(layer_nodes) + required = required.union(t) s = s.union(t) return required @@ -63,7 +78,6 @@ def feed_forward_layers(inputs, outputs, connections): :param inputs: list of the network input nodes :param outputs: list of the output node identifiers :param connections: list of (input, output) connections in the network. - Returns a list of layers, with each layer consisting of a set of node identifiers. Note that the returned layers do not contain nodes whose output is ultimately never used to compute the final network output. @@ -72,21 +86,24 @@ def feed_forward_layers(inputs, outputs, connections): required = required_for_output(inputs, outputs, connections) layers = [] - s = set(inputs) - while 1: + potential_input = set(inputs) + while True: # Find candidate nodes c for the next layer. These nodes should connect # a node in s to a node not in s. - c = set(b for (a, b) in connections if a in s and b not in s) + c = set(b for (a, b) in connections if a in potential_input and b not in potential_input) # Keep only the used nodes whose entire input set is contained in s. - t = set() + next_layer = set() for n in c: - if n in required and all(a in s for (a, b) in connections if b == n): - t.add(n) + # select connections (a, b) where b == n + connections_to_n = [(a, b) for (a, b) in connections if b == n and a in required] + if n in required and all(a in potential_input for (a, b) in connections_to_n): + next_layer.add(n) - if not t: + if not next_layer: break - layers.append(t) - s = s.union(t) + layers.append(next_layer) + potential_input = potential_input.union(next_layer) + + return layers, required - return layers diff --git a/neat/nn/feed_forward.py b/neat/nn/feed_forward.py index df8fc967..adb61bff 100644 --- a/neat/nn/feed_forward.py +++ b/neat/nn/feed_forward.py @@ -1,5 +1,5 @@ from neat.graphs import feed_forward_layers - +import random class FeedForwardNetwork(object): def __init__(self, inputs, outputs, node_evals): @@ -25,21 +25,25 @@ def activate(self, inputs): return [self.values[i] for i in self.output_nodes] @staticmethod - def create(genome, config): + def create(genome, config, unique_value=False, random_values=False): """ Receives a genome and returns its phenotype (a FeedForwardNetwork). """ # Gather expressed connections. connections = [cg.key for cg in genome.connections.values() if cg.enabled] - layers = feed_forward_layers(config.genome_config.input_keys, config.genome_config.output_keys, connections) + layers, required = feed_forward_layers(config.genome_config.input_keys, config.genome_config.output_keys, connections) node_evals = [] for layer in layers: for node in layer: inputs = [] for conn_key in connections: inode, onode = conn_key - if onode == node: + if onode == node and inode in required: cg = genome.connections[conn_key] + if random_values: + cg.weight = random.uniform(-1.0, 1.0) + if unique_value: + cg.weight = unique_value inputs.append((inode, cg.weight)) ng = genome.nodes[node] diff --git a/neat/parallel.py b/neat/parallel.py index ac66acf1..dc380d4c 100644 --- a/neat/parallel.py +++ b/neat/parallel.py @@ -3,17 +3,17 @@ in order to evaluate multiple genomes at once. """ from multiprocessing import Pool - +from tqdm import tqdm class ParallelEvaluator(object): - def __init__(self, num_workers, eval_function, timeout=None, maxtasksperchild=None): + def __init__(self, num_workers, eval_function, timeout=None, initializer=None, initargs=(), maxtasksperchild=None): """ eval_function should take one argument, a tuple of (genome object, config object), and return a single float (the genome's fitness). """ self.eval_function = eval_function self.timeout = timeout - self.pool = Pool(processes=num_workers, maxtasksperchild=maxtasksperchild) + self.pool = Pool(processes=num_workers, maxtasksperchild=maxtasksperchild, initializer=initializer, initargs=initargs) def __del__(self): self.pool.close() @@ -26,5 +26,5 @@ def evaluate(self, genomes, config): jobs.append(self.pool.apply_async(self.eval_function, (genome, config))) # assign the fitness back to each genome - for job, (ignored_genome_id, genome) in zip(jobs, genomes): - genome.fitness = job.get(timeout=self.timeout) + for job, (ignored_genome_id, genome) in tqdm(zip(jobs, genomes), total=len(jobs)): + genome.fitness = job.get(timeout=self.timeout) \ No newline at end of file