diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml new file mode 100644 index 0000000..453925c --- /dev/null +++ b/.JuliaFormatter.toml @@ -0,0 +1 @@ +style = "sciml" \ No newline at end of file diff --git a/Project.toml b/Project.toml index 89df4d0..406725d 100644 --- a/Project.toml +++ b/Project.toml @@ -1,25 +1,46 @@ name = "CompositionalNetworks" uuid = "4b67e4b5-442d-4ef5-b760-3f5df3a57537" authors = ["Jean-François Baffier"] -version = "0.5.9" +version = "0.6.0" [deps] ConstraintCommons = "e37357d9-0691-492f-a822-e5ea6a920954" ConstraintDomains = "5800fd60-8556-4464-8d61-84ebf7a0bedb" Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" +ExproniconLite = "55351af7-c7e9-48d6-89ff-24e801d99491" JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899" OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" TestItems = "1c621080-faea-4a02-84b6-bbd5e436b8fe" Unrolled = "9602ed7d-8fef-5bc8-8597-8f21381861e8" +[weakdeps] +Evolutionary = "86b6b26d-c046-49b6-aa0b-5f0f74682bd6" +# LocalSearchSolvers = "2b10edaa-728d-4283-ac71-07e312d6ccf3" +# JuMP = "4076af6c-e467-56ae-b986-b466b2749572" +# Juniper = "2ddba703-00a4-53a7-87a5-e8b9971dde84" +# Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" +# Gurobi = "2e9cd046-0924-5485-92f1-d5272153d98b" + +[extensions] +GeneticExt = "Evolutionary" +# LocalSearchSolversExt = "LocalSearchSolvers" +# JuMPExt = ["JuMP", "Juniper", "Ipopt", "Gurobi"] + [compat] -ConstraintCommons = "0.2" -ConstraintDomains = "0.3" +LocalSearchSolvers = "0.4" +Evolutionary = "0.11" +# JuMP = "1" +# Juniper = "0.9" +# Ipopt = "1" +# Gurobi = "1.7" +ConstraintCommons = "0.2, 0.3" +ConstraintDomains = "0.3, 0.4" Dictionaries = "0.4" Distances = "0.10" -JuliaFormatter = "1" +ExproniconLite = "0.10.13" +JuliaFormatter = "1, 2" OrderedCollections = "1" Random = "1" TestItems = "1" @@ -29,6 +50,11 @@ julia = "1.10" [extras] Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" Evolutionary = "86b6b26d-c046-49b6-aa0b-5f0f74682bd6" +LocalSearchSolvers = "2b10edaa-728d-4283-ac71-07e312d6ccf3" +# JuMP = "4076af6c-e467-56ae-b986-b466b2749572" +# Juniper = "2ddba703-00a4-53a7-87a5-e8b9971dde84" +# Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" +# Gurobi = "2e9cd046-0924-5485-92f1-d5272153d98b" ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7" JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b" Memoization = "6fafb56a-5788-4b4e-91ca-c0cea6611c73" @@ -37,4 +63,14 @@ TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a" ThreadPools = "b189fb0b-2eb5-4ed4-bc0c-d34c51242431" [targets] -test = ["Aqua", "ExplicitImports", "JET", "Evolutionary", "Memoization", "Test", "TestItemRunner", "ThreadPools"] +test = [ + "Aqua", + "ExplicitImports", + "JET", + "Evolutionary", + # "LocalSearchSolvers", + "Memoization", + "Test", + "TestItemRunner", + "ThreadPools", +] diff --git a/ext/GeneticExt.jl b/ext/GeneticExt.jl new file mode 100644 index 0000000..b1e6c34 --- /dev/null +++ b/ext/GeneticExt.jl @@ -0,0 +1,133 @@ +module GeneticExt + +import CompositionalNetworks: + CompositionalNetworks, AbstractICN, Configurations, manhattan, + hamming +import CompositionalNetworks: GeneticOptimizer, apply!, weights_bias, regularization +import CompositionalNetworks: evaluate, solutions +import Evolutionary: Evolutionary, tournament, SPX, flip, GA + +function CompositionalNetworks.GeneticOptimizer(; + global_iter = Threads.nthreads(), + # local_iter=64, + local_iter = 400, + memoize = false, + #pop_size=64, + pop_size = 100, + sampler = nothing +) + return GeneticOptimizer(global_iter, local_iter, memoize, pop_size, sampler) +end + +function generate_population(icn, pop_size; vect = []) + population = Vector{BitVector}() + if isempty(vect) + foreach(_ -> push!(population, falses(length(icn.weights))), 1:pop_size) + else + foreach(_ -> push!(population, vect), 1:pop_size) + end + return population +end + +function CompositionalNetworks.optimize!( + icn::T, + configurations::Configurations, + # dom_size, + metric_function::Union{Function, Vector{Function}}, + optimizer_config::GeneticOptimizer; + samples = nothing, + memoize = false, + parameters... +) where {T <: AbstractICN} + + # @info icn.weights + + # inplace = zeros(dom_size, 18) + solution_iter = solutions(configurations) + non_solutions = solutions(configurations; non_solutions = true) + solution_vector = [i.x for i in solution_iter] + + function fitness(w) + weights_validity = apply!(icn, w) + + a = if metric_function isa Function + metric_function( + icn, + configurations, + solution_vector; + weights_validity = weights_validity, + parameters... + ) + else + minimum( + met -> met( + icn, + configurations, + solution_vector; + weights_validity = weights_validity, + parameters... + ), + metric_function + ) + end + + b = weights_bias(w) + c = regularization(icn) + + function new_regularization(icn::AbstractICN) + start = 1 + count = 0 + total = 0 + for (i, layer) in enumerate(icn.layers) + if !layer.mutex + ran = start:(start + icn.weightlen[i] - 1) + op = findall(icn.weights[ran]) + max_op = ran .- (start - 1) + total += (sum(op) / sum(max_op)) + count += 1 + end + start += icn.weightlen[i] + end + return total / count + end + + d = sum(findall(icn.weights)) / + (length(icn.weights) * (length(icn.weights) + 1) / 2) + + e = new_regularization(icn) + + # @info "Lot of things" a b c d e + #= + println(""" + sum: $a + weights bias: $b + regularization: $c + new reg: $e + thread: $(Threads.threadid()) + """) =# + + return a + b + c + end + + _icn_ga = GA(; + populationSize = optimizer_config.pop_size, + crossoverRate = 0.8, + epsilon = 0.05, + selection = tournament(4), + crossover = SPX, + mutation = flip, + mutationRate = 1.0 + ) + + pop = generate_population(icn, optimizer_config.pop_size) + r = Evolutionary.optimize( + fitness, + pop, + _icn_ga, + Evolutionary.Options(; iterations = optimizer_config.local_iter) + ) + validity = apply!(icn, Evolutionary.minimizer(r)) + return icn => validity +end + +end diff --git a/ext/JuMPExt.jl b/ext/JuMPExt.jl new file mode 100644 index 0000000..47eb9cb --- /dev/null +++ b/ext/JuMPExt.jl @@ -0,0 +1,109 @@ +module JuMPExt + +using JuMP +using Juniper +using Ipopt +using Gurobi + +# Original imports +import CompositionalNetworks: CompositionalNetworks, AbstractICN, Configurations +import CompositionalNetworks: JuMPOptimizer, apply!, weights_bias, regularization +import CompositionalNetworks: evaluate, solutions + +function CompositionalNetworks.optimize!( + icn::T, + configurations::Configurations, + metric_function::Union{Function, Vector{Function}}, + optimizer_config::JuMPOptimizer; + parameters... +) where {T <: AbstractICN} + # Create model + m = Model() + + # Set up MINLP solver + nl_solver = optimizer_with_attributes(Ipopt.Optimizer, "print_level" => 0) + mip_solver = optimizer_with_attributes(Gurobi.Optimizer, "OutputFlag" => 0) + + set_optimizer( + m, + optimizer_with_attributes( + Juniper.Optimizer, + "nl_solver" => nl_solver, + "mip_solver" => mip_solver, + "log_levels" => [] + ) + ) + + n = length(icn.weights) + + # All variables are binary + @variable(m, w[1:n], Bin) + + # Add constraints + start = 1 + for (i, layer) in enumerate(icn.layers) + stop = start + icn.weightlen[i] - 1 + idx_range = start:stop + + if layer.mutex + # Mutually exclusive constraint - at most one variable can be true + # Equivalent to: max(0.0, sum(w[idx_range]) - 1) = 0 + @constraint(m, sum(w[j] for j in idx_range) <= 1) + else + # No empty layer constraint - at least one variable must be true + # Equivalent to: max(0, 1 - sum(w[idx_range])) = 0 + @constraint(m, sum(w[j] for j in idx_range) >= 1) + end + + start = stop + 1 + end + + # Define fitness function - keeping the original structure + function fitness(w_values) + # Convert JuMP variables to BitVector + w_bits = BitVector([value(w_values[i]) > 0.5 for i in 1:length(w_values)]) + + weights_validity = apply!(icn, w_bits) + + s = if metric_function isa Function + metric_function( + icn, + configurations, + solution_vector; + weights_validity = weights_validity, + parameters... + ) + else + minimum( + met -> met( + icn, + configurations, + solution_vector; + weights_validity = weights_validity, + parameters... + ), + metric_function + ) + end + return s + weights_bias(w_bits) + regularization(icn) + end + + # Define objective using the fitness function + @NLobjective(m, Min, fitness(w)) + + # Solve model + optimize!(m) + + # Return solution + if termination_status(m) in [MOI.OPTIMAL, MOI.LOCALLY_SOLVED] + w_sol = value.(w) .> 0.5 # Convert to BitVector + weights_validity = apply!(icn, BitVector(w_sol)) + return icn => weights_validity + else + # No solution found, generate new valid weights + CompositionalNetworks.generate_new_valid_weights!(icn) + return icn => true + end +end + +end diff --git a/ext/LocalSearchSolversExt.jl b/ext/LocalSearchSolversExt.jl new file mode 100644 index 0000000..76fdb3b --- /dev/null +++ b/ext/LocalSearchSolversExt.jl @@ -0,0 +1,101 @@ +module LocalSearchSolversExt + +import CompositionalNetworks: CompositionalNetworks, AbstractICN, Configurations +import CompositionalNetworks: LocalSearchOptimizer, apply!, weights_bias, regularization +import CompositionalNetworks: evaluate, solutions +import LocalSearchSolvers: model, domain, variable!, constraint!, objective!, solver, solve! +import LocalSearchSolvers: LocalSearchSolvers, has_solution, best_values + +function CompositionalNetworks.LocalSearchOptimizer(; + options::LocalSearchSolvers.Options = LocalSearchSolvers.Options(), +) + return LocalSearchOptimizer(options) +end + +function mutually_exclusive(layer_weights, w) + l = layer_weights |> length + x = w |> length + return iszero(x) ? 1.0 : max(0.0, x - l) +end + +no_empty_layer(x; X = nothing) = max(0, 1 - sum(x)) + +parameter_specific_operations(x; X = nothing) = 0.0 + +function CompositionalNetworks.optimize!( + icn::T, + configurations::Configurations, + metric_function::Function, + optimizer_config::LocalSearchOptimizer; + parameters... +) where {T <: AbstractICN} + @debug "starting debug opt" + m = model(; kind = :icn) + n = length(icn.weights) + + # All variables are boolean + d = domain([false, true]) + + # Add variables + foreach(_ -> variable!(m, d), 1:n) + + # Add constraint + start = 1 + for (i, layer) in enumerate(icn.layers) + stop = start + icn.weightlen[i] - 1 + if layer.mutex + f(x; X = nothing) = mutually_exclusive(icn.weightlen[i], x) + constraint!(m, f, start:stop) + else + constraint!(m, no_empty_layer, start:stop) + end + start = stop + 1 + end + + function fitness(w) + weights_validity = apply!(icn, w) + + s = if metric_function isa Function + metric_function( + icn, + configurations, + solution_vector; + weights_validity = weights_validity, + parameters... + ) + else + minimum( + met -> met( + icn, + configurations, + solution_vector; + weights_validity = weights_validity, + parameters... + ), + metric_function + ) + end + + return s + weights_bias(w) + regularization(icn) + end + + objective!(m, fitness) + + # Create solver and solve + s = solver(m; options = optimizer_config.options) + solve!(s) + @debug "pool" s.pool best_values(s.pool) best_values(s) s.pool.configurations + + # Return best values + + weights_validity = if has_solution(s) + apply!(icn, BitVector(collect(best_values(s)))) + else + CompositionalNetworks.generate_new_valid_weights!(icn) + true + end + + return icn => weights_validity +end + +end diff --git a/old/composition.jl b/old/composition.jl new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/old/composition.jl @@ -0,0 +1 @@ + diff --git a/old/layer.jl b/old/layer.jl new file mode 100644 index 0000000..c3b6f40 --- /dev/null +++ b/old/layer.jl @@ -0,0 +1,101 @@ +""" + Layer +A structure to store a `LittleDict` of operations that can be selected during the learning phase of an ICN. If the layer is exclusive, only one operation can be selected at a time. +""" +struct Layer + exclusive::Bool + functions::LittleDict{Symbol, Function} + parameters::Vector{Symbol} +end + +""" + functions(layer) +Access the operations of a layer. The container is ordered. +""" +functions(layer) = layer.functions + +""" + length(layer) +Return the number of operations in a layer. +""" +Base.length(layer::Layer) = length(functions(layer)) + +""" + exclu(layer) +Return `true` if the layer has mutually exclusive operations. +""" +exclu(layer) = layer.exclusive + +""" + symbol(layer, i) +Return the i-th symbols of the operations in a given layer. +""" +symbol(layer, i) = begin + if i > length(layer) + @info layer i functions(layer) + end + collect(keys(functions(layer)))[i] +end + +""" + nbits_exclu(layer) +Convert the length of an exclusive layer into a number of bits. +""" +nbits_exclu(layer) = ceil(Int, log2(length(layer))) + +""" + show_layer(layer) +Return a string that contains the elements in a layer. +""" +show_layer(layer) = layer |> functions |> keys |> string + +""" + selected_size(layer, layer_weights) +Return the number of operations selected by `layer_weights` in `layer`. +""" +selected_size(layer, layer_weights) = exclu(layer) ? 1 : sum(layer_weights) + +""" + is_viable(layer, w) + is_viable(icn) + is_viable(icn, w) +Assert if a pair of layer/icn and weights compose a viable pattern. If no weights are given with an icn, it will check the current internal value. +""" +is_viable(layer::Layer, w) = exclu(layer) ? as_int(w) < length(layer) : any(w) + +""" + generate_inclusive_operations(predicate, bits) + generate_exclusive_operation(max_op_number) +Generates the operations (weights) of a layer with inclusive/exclusive operations. +""" +function generate_inclusive_operations(predicate, bits) + ind = falses(bits) + while true + ind = bitrand(bits) + predicate(ind) && break + end + return ind +end + +""" + generate_exclusive_operation(max_op_number) +Generates the operations (weights) of a layer with exclusive operations. +""" +function generate_exclusive_operation(max_op_number) + op = rand(1:max_op_number) + return as_bitvector(op, max_op_number) +end + +""" + generate_weights(layers) + generate_weights(icn) +Generate the weights of a collection of layers or of an ICN. +""" +function generate_weights(layers) + bitvecs = map( + l -> exclu(l) ? generate_exclusive_operation(length(l)) : + generate_inclusive_operations(any, length(l)), + layers + ) + return vcat(bitvecs...) +end diff --git a/src/learn.jl b/old/learn.jl similarity index 80% rename from src/learn.jl rename to old/learn.jl index a2410f7..db8434c 100644 --- a/src/learn.jl +++ b/old/learn.jl @@ -12,18 +12,19 @@ end Create an ICN, optimize it, and return its composition. """ function learn_compose( - solutions, - non_sltns, - dom_size; - metric = :hamming, - optimizer, - X_test = nothing, - parameters..., + solutions, + non_sltns, + dom_size; + metric = :hamming, + optimizer, + X_test = nothing, + parameters... ) icn = ICN(; parameters...) - _, weights = - optimize!(icn, solutions, non_sltns, dom_size, metric, optimizer; parameters...) - compositions = Dictionary{Composition,Int}() + _, + weights = optimize!( + icn, solutions, non_sltns, dom_size, metric, optimizer; parameters...) + compositions = Dictionary{Composition, Int}() for (bv, occurrences) in pairs(weights) set!(compositions, compose(deepcopy(icn), bv), occurrences) @@ -49,13 +50,13 @@ Explore a search space, learn a composition from an ICN, and compose an error fu - `action`: either `:symbols` to have a description of the composition or `:composition` to have the composed function itself """ function explore_learn_compose( - domains, - concept; - configurations = nothing, - metric = :hamming, - optimizer, - X_test = nothing, - parameters..., + domains, + concept; + configurations = nothing, + metric = :hamming, + optimizer, + X_test = nothing, + parameters... ) if isnothing(configurations) configurations = explore(domains, concept; parameters...) @@ -70,7 +71,7 @@ function explore_learn_compose( metric, optimizer, X_test, - parameters..., + parameters... ) end @@ -94,29 +95,30 @@ Explore, learn and compose a function and write it to a file. - `popSize`: size of the population in the genetic algorithm """ function compose_to_file!( - concept, - name, - path; - configurations = nothing, - domains, - language = :Julia, - metric = :hamming, - optimizer, - X_test = nothing, - parameters..., + concept, + name, + path; + configurations = nothing, + domains, + language = :Julia, + metric = :hamming, + optimizer, + X_test = nothing, + parameters... ) if isnothing(configurations) configurations = explore(domains, concept; parameters...) end - compo, icn, _ = explore_learn_compose( + compo, icn, + _ = explore_learn_compose( domains, concept; configurations, metric, optimizer, X_test, - parameters..., + parameters... ) composition_to_file!(compo, path, name, language) return icn diff --git a/src/utils.jl b/old/utils.jl similarity index 88% rename from src/utils.jl rename to old/utils.jl index 991833e..d4fae7b 100644 --- a/src/utils.jl +++ b/old/utils.jl @@ -7,7 +7,7 @@ function map_tr!(f, x, X; p...) f, x, X; - p..., + p... ) end # function map_tr!(f, x, X) @@ -27,9 +27,10 @@ function lazy(funcs::Function...) for f in Iterators.map(Symbol, funcs) eval( :( - $f(x::V, X; params...) where {V<:AbstractVector} = - map_tr!($f, x, X; params...) - ), + function $f(x::V, X; params...) where {V <: AbstractVector} + map_tr!($f, x, X; params...) + end + ), ) eval(:($f(x; params...) = $f(x, similar(x); params...))) end @@ -44,9 +45,10 @@ function lazy_param(funcs::Function...) for f in Iterators.map(Symbol, funcs) eval( :( - $f(x::V, X; params...) where {V<:AbstractVector} = - map_tr!($f, x, X; params...) - ), + function $f(x::V, X; params...) where {V <: AbstractVector} + map_tr!($f, x, X; params...) + end + ), ) eval(:($f(x; params...) = $f(x, similar(x); params...))) end @@ -99,7 +101,7 @@ Application of an operation from the transformation layer. Used to generate more function tr_in end @unroll function tr_in(tr, X, x; params...) - @unroll for i = 1:length(tr) + @unroll for i in 1:length(tr) tr[i](x, @view(X[:, i]); params...) end end diff --git a/src/CompositionalNetworks.jl b/src/CompositionalNetworks.jl index 42b8fe9..a225c57 100644 --- a/src/CompositionalNetworks.jl +++ b/src/CompositionalNetworks.jl @@ -1,60 +1,45 @@ module CompositionalNetworks -# imports -import ConstraintCommons: incsert! -import ConstraintDomains: explore +# SECTION - Imports +import ConstraintCommons: incsert!, extract_parameters, USUAL_CONSTRAINT_PARAMETERS +import ConstraintDomains: explore, DiscreteDomain, domain_size import Dictionaries: Dictionary, set! import Distances +import ExproniconLite: JLFunction, has_symbol, codegen_ast, xtuple, sprint_expr import JuliaFormatter: SciMLStyle, format_text import OrderedCollections: LittleDict import Random: bitrand import TestItems: @testitem import Unrolled: @unroll -export Composition -export ICN - -export aggregation_layer -export arithmetic_layer -export code -export comparison_layer +# SECTION - Exports +export hamming, minkowski, manhattan, weights_bias +export AbstractOptimizer, GeneticOptimizer, LocalSearchOptimizer, optimize! +export generate_configurations, explore_learn +export AbstractLayer, + Transformation, Aggregation, LayerCore, Arithmetic, Comparison, SimpleFilter, + PairedMap +export AbstractSolution, Solution, NonSolution, Configuration, Configurations, solutions +export AbstractICN, + check_weights_validity, generate_new_valid_weights, apply!, evaluate, ICN, create_icn export compose -export compose_to_file! -export composition -export composition_to_file! -export explore_learn_compose -export hamming -export incsert! -export lazy -export lazy_param -export learn_compose -export manhattan -export max_icn_length -export minkowski -export nbits -export optimize! -export regularization -export show_layers -export symbols -export transformation_layer -export weights -export weights! -export weights_bias - -# Include utils -include("utils.jl") -include("metrics.jl") -# Includes layers +# SECTION - Includes +# layers include("layer.jl") -include("layers/transformation.jl") -include("layers/arithmetic.jl") include("layers/aggregation.jl") +include("layers/arithmetic.jl") include("layers/comparison.jl") +include("layers/simple_filter.jl") +include("layers/pairedmap.jl") +include("layers/transformation.jl") -# Include ICN +# optimization +include("configuration.jl") include("icn.jl") -include("composition.jl") -include("learn.jl") +include("optimizer.jl") +include("learn_and_explore.jl") +include("metrics.jl") +include("compose.jl") end diff --git a/src/compose.jl b/src/compose.jl new file mode 100644 index 0000000..a9533c9 --- /dev/null +++ b/src/compose.jl @@ -0,0 +1,54 @@ +""" +Generate a julia function for a given ICN + +Example usage: +```julia +compose(ICN(), name = :hopefullyworkingfunction) +``` +""" +function compose( + icn::AbstractICN; + name::Symbol = gensym(), + jlfun = true, + fname = "", + dbg = false +) + f = JLFunction() + f.name = name + f.args = [:x] + f.kwargs = collect(icn.parameters) + + fns = [] + _start = 1 + weights = icn.weights.parent + for (i, layer) in enumerate(icn.layers) + j = findall(weights[_start:(_start - 1 + length(layer.fn))]) + if layer.mutex + push!(fns, :($(layer.name) = x = $(layer.fnexprs[j[1]].body))) + else + temp = xtuple([layer.fnexprs[k].body for k in j]...) + push!( + fns, + :( + $(layer.name) = x = $(temp) |> + ifelse( + isempty(x), r -> collect(typeof(x), r), collect) + ) + ) + end + if dbg + push!( + fns, + :(@info($(string(layer.name)), $(layer.name), typeof($(layer.name)))) + ) + end + _start += length(layer.fn) + end + f.body = Expr(:block, push!(fns, :(return x))...) + if !isempty(fname) + open(fname, "w") do fio + write(fio, sprint_expr(f)) + end + end + return (eval(codegen_ast(f)), jlfun ? f : codegen_ast(f)) +end diff --git a/src/composition.jl b/src/composition.jl deleted file mode 100644 index 6e200d1..0000000 --- a/src/composition.jl +++ /dev/null @@ -1,106 +0,0 @@ -""" - struct Composition{F<:Function} - -Store the all the information of a composition learned by an ICN. -""" -struct Composition{F<:Function} - code::Dict{Symbol,String} - f::F - symbols::Vector{Vector{Symbol}} -end - -""" - Composition(f::F, symbols) where {F<:Function} - -Construct a `Composition`. -""" -function Composition(f::F, symbols) where {F<:Function} - code = Dict{Symbol,String}() - return Composition{F}(code, f, symbols) -end - -""" - code(c::Composition, lang=:maths; name="composition") - -Access the code of a composition `c` in a given language `lang`. The name of the generated method is optional. -""" -function code(c::Composition, lang = :maths; name = "composition") - return get!(c.code, lang, generate(c, name, Val(lang))) -end - -""" - composition(c::Composition) - -Access the actual method of an ICN composition `c`. -""" -composition(c::Composition) = c.f - -""" - symbols(c::Composition) - -Output the composition as a layered collection of `Symbol`s. -""" -symbols(c::Composition) = c.symbols - -""" - compose(icn, weights=nothing) -Return a function composed by some of the operations of a given ICN. Can be applied to any vector of variables. If `weights` are given, will assign to `icn`. -""" -function compose(icn::ICN, weights::BitVector = BitVector()) - !isempty(weights) && weights!(icn, weights) - composition, symbols = _compose(icn) - return Composition(composition, symbols) -end - -""" - generate(c::Composition, name, lang) - -Generates the code of `c` in a specific language `lang`. -""" -function generate(c::Composition, name, ::Val{:maths}) - aux = map(s -> reduce_symbols(s, ", ", length(s) > 1), symbols(c)) - def = reduce_symbols(aux, " ∘ ", false) - return "$name = $def" -end - -function generate(c::Composition, name, ::Val{:Julia}) - symbs = symbols(c) - @assert length(symbs) == 4 "Length of the decomposition ≠ 4" - tr_length = length(symbs[1]) - - CN = "CompositionalNetworks." - tr = reduce_symbols(symbs[1], ", "; prefix = CN * "tr_") - ar = reduce_symbols(symbs[2], ", ", false; prefix = CN * "ar_") - ag = reduce_symbols(symbs[3], ", ", false; prefix = CN * "ag_") - co = reduce_symbols(symbs[4], ", ", false; prefix = CN * "co_") - - documentation = """\"\"\" - $name(x; X = zeros(length(x), $tr_length), params...) - - Composition `$name` generated by CompositionalNetworks.jl. - ``` - $(code(c; name)) - ``` - \"\"\" - """ - - output = """ - function $name(x; X = zeros(length(x), $tr_length), dom_size, params...) - $(CN)tr_in(Tuple($tr), X, x; params) - X[1:length(x), 1] .= 1:length(x) .|> (i -> $ar(@view X[i, 1:$tr_length])) - return $ag(@view X[:, 1]) |> (y -> $co(y; dom_size, nvars=length(x), params...)) - end - """ - return documentation * format_text(output, SciMLStyle(); pipe_to_function_call = false) -end - -""" - composition_to_file!(c::Composition, path, name, language=:Julia) - -Write the composition code in a given `language` into a file at `path`. -""" -function composition_to_file!(c::Composition, path, name, language = :Julia) - output = code(c, language; name) - write(path, output) - return nothing -end diff --git a/src/configuration.jl b/src/configuration.jl new file mode 100644 index 0000000..6e98fdb --- /dev/null +++ b/src/configuration.jl @@ -0,0 +1,17 @@ +abstract type AbstractSolution end + +struct Solution <: AbstractSolution + x::Any +end + +struct NonSolution <: AbstractSolution + x::Any +end + +const Configuration{T} = T where {T <: AbstractSolution} # alias + +const Configurations{N} = Set{<:Configuration} + +function solutions(x::Configurations; non_solutions = false) + Iterators.filter(r -> isa(r, ifelse(non_solutions, NonSolution, Solution)), x) +end diff --git a/src/icn.jl b/src/icn.jl index 2c21632..c71dc33 100644 --- a/src/icn.jl +++ b/src/icn.jl @@ -1,165 +1,285 @@ -""" - ICN(; nvars, dom_size, param, transformation, arithmetic, aggregation, comparison) -Construct an Interpretable Compositional Network, with the following arguments: -- `nvars`: number of variable in the constraint -- `dom_size: maximum domain size of any variable in the constraint` -- `param`: optional parameter (default to `nothing`) -- `transformation`: a transformation layer (optional) -- `arithmetic`: a arithmetic layer (optional) -- `aggregation`: a aggregation layer (optional) -- `comparison`: a comparison layer (optional) -""" -mutable struct ICN - transformation::Layer - arithmetic::Layer - aggregation::Layer - comparison::Layer - weights::BitVector +abstract type AbstractICN end - function ICN(; - param = Vector{Symbol}(), - tr_layer = transformation_layer(param), - ar_layer = arithmetic_layer(), - ag_layer = aggregation_layer(), - co_layer = comparison_layer(param), - ) - w = generate_weights([tr_layer, ar_layer, ag_layer, co_layer]) - return new(tr_layer, ar_layer, ag_layer, co_layer, w) - end +#= +function extract_params(fnexprs, parameters) + v = falses(length(fnexprs)) + keynames = keys(parameters) + for i in 1:length(fnexprs) + exprs = fnexprs[i].kwargs + v[i] = if exprs == [:(params...)] + true + else + flag = falses(length(exprs)) + for j in 1:length(exprs)-1 + for k in 1:length(keynames) + has_symbol(exprs[j], keynames[k]) && (flag[j] = true) + end + end + !(false in flag) + end + end + return findall(v) end +=# -""" - layers(icn) -Return the ordered layers of an ICN. -""" -layers(icn) = [icn.transformation, icn.arithmetic, icn.aggregation, icn.comparison] - -""" - Base.length(icn) -Return the total number of operations of an ICN. -""" -Base.length(icn::ICN) = sum(length, layers(icn)) - -""" - nbits(icn) -Return the expected number of bits of a viable weight of an ICN. -""" -nbits(icn) = mapreduce(l -> exclu(l) ? nbits_exclu(l) : length(l), +, layers(icn)) - -""" - weights(icn) -Access the current set of weights of an ICN. -""" -weights(icn) = icn.weights - -function is_viable(icn::ICN, weights) - _start = 0 - _end = 0 - - for layer in layers(icn) - _start = _end + 1 - _end += exclu(layer) ? nbits_exclu(layer) : length(layer) - - w = @view weights[_start:_end] - - !is_viable(layer, w) && return false +function check_weights_validity(icn::AbstractICN, weights::AbstractVector{Bool}) + @assert length(weights) === sum(icn.weightlen) + offset = 1 + for (i, layer) in enumerate(icn.layers) + index = offset:(offset + icn.weightlen[i] - 1) + + flag = if layer.mutex + sum(icn.weights[index]) == 1 + else + sum(icn.weights[index]) >= 1 + end + if !flag + return false + end + offset += icn.weightlen[i] end return true end -is_viable(icn::ICN) = is_viable(icn, weights(icn)) - -""" - weights!(icn, weights) -Set the weights of an ICN with a `BitVector`. -""" -function weights!(icn, weights) - length(weights) == nbits(icn) || @warn icn weights nbits(icn) - @assert length(weights) == nbits(icn) - return icn.weights = weights + +function generate_new_valid_weights( + layers::T, + weightlen::Vector{Int} +) where {T <: AbstractVector{<:AbstractLayer}} + weights = Array{Bool}(undef, sum(weightlen)) + offset = 1 + for (i, layer) in enumerate(layers) + index = offset:(offset + weightlen[i] - 1) + # @info index weightlen[i] weights[offset] + weights[index] .= if layer.mutex + temp = falses(weightlen[i]) + temp[rand(1:length(temp))] = true + temp + else + rand(Bool, weightlen[i]) + end + offset += weightlen[i] + end + return weights end -""" - show_layers(icn) -Return a formatted string with each layers in the icn. -""" -show_layers(icn) = map(show_layer, layers(icn)) - -generate_weights(icn::ICN) = generate_weights(layers(icn)) - -""" - regularization(icn) -Return the regularization value of an ICN weights, which is proportional to the normalized number of operations selected in the icn layers. -""" -function regularization(icn) - Σmax = 0 - Σop = 0 - _start = 0 - _end = 0 - for layer in layers(icn) - l = length(layer) - _start = _end + 1 - _end += exclu(layer) ? nbits_exclu(layer) : l - if !exclu(layer) - Σop += selected_size(layer, @view weights(icn)[_start:_end]) - Σmax += length(layer) +function generate_new_valid_weights!(icn::T) where {T <: AbstractICN} + icn.weights .= generate_new_valid_weights(icn.layers, icn.weightlen) + nothing +end + +function apply!(icn::AbstractICN, weights::BitVector)::Bool + icn.weights .= weights + return check_weights_validity(icn, weights) +end + +function evaluate( + icn::AbstractICN, + config::Configuration; + weights_validity = true, + parameters... +) + if weights_validity + input = config.x + # @warn icn.weights icn.weightlen + weightoffset = 1 + lengthoff = 0 + for (i, layer) in enumerate(icn.layers) + weightrange = weightoffset:(weightoffset + icn.weightlen[i] - 1) + considerweights = icn.weights.indices[1][weightrange] .- lengthoff + + # @error considerweights findall(icn.weights[weightrange]) weightrange + considerweights = considerweights[findall(icn.weights[weightrange])] + + considerfns = [layer.fn[i] for i in considerweights] + output = nothing + # @info layer.name output layer.argtype[1] layer.argtype[2] input considerweights layer.mutex considerfns + input = layer.mutex ? considerfns[1](input; parameters...) : + [j(input; parameters...) for j in considerfns] + # @warn "What?" input + #input = output + weightoffset += icn.weightlen[i] + lengthoff += length(layer.fn) end + return Float64(input) + else + return Inf end - return Σop / (Σmax + 1) end -max_icn_length(icn = ICN(; param = [:val])) = length(icn.transformation) - -""" - _compose(icn) -Internal function called by `compose` and `show_composition`. -""" -function _compose(icn::ICN) - !is_viable(icn) && ( - return ( - (x; X = zeros(length(x), max_icn_length()), param = nothing, dom_size = 0) -> typemax(Float64) - ), - [] +function evaluate( + icns::Vector{<:AbstractICN}, + config::Configuration; + weights_validity = trues(length(icns)), + parameters... +) + evaluation_output = Array{Float64}(undef, length(icns)) + for (i, icn) in enumerate(icns) + # @info weights_validity[i], parameters, icn.parameters + evaluation_output[i] = evaluate( + icn, config; weights_validity = weights_validity[i], parameters...) + end + return sum(evaluation_output) / length(evaluation_output) +end + +function evaluate( + icn_validity::Pair{<:AbstractICN, Bool}, + config::Configuration; + parameters... +) + evaluate( + icn_validity[1], + config; + weights_validity = icn_validity[2], + icn_validity[1].constants..., + parameters... ) +end - funcs = Vector{Vector{Function}}() - symbols = Vector{Vector{Symbol}}() +function evaluate( + icns::Vector{Pair{<:AbstractICN, Bool}}, + config::Configuration; + parameters... +) + evaluation_output = Array{Float64}(undef, length(icns)) + vals = if haskey(parameters, :vals) + parameters[:vals] + else + nothing + end + param = Base.structdiff((; parameters...,), NamedTuple{(:vals,)}) + params = [(val = i, param...) for i in vals] - _start = 0 - _end = 0 + for (i, icn_validity) in enumerate(icns) + # @info weights_validity[i], parameters, icn.parameters + evaluation_output[i] = evaluate( + icn_validity[1], + config; + weights_validity = icn_validity[2], + icn_validity[1].constants..., + params[i]... + ) + end + return sum(evaluation_output) / length(evaluation_output) +end + +#= +function evaluate(icn::Nothing, config::Configuration) + return Inf +end +=# + +(icn::AbstractICN)(weights::BitVector) = apply!(icn, weights) +(icn::AbstractICN)(config::Configuration) = evaluate(icn, config) + +struct ICN{S} <: AbstractICN where {S <: Union{AbstractVector{<:AbstractLayer}, Nothing}} + weights::AbstractVector{Bool} + parameters::Set{Symbol} + layers::S + connection::Vector{UInt32} + weightlen::AbstractVector{Int} + constants::Dict + function ICN(; + weights = BitVector[], + parameters = Symbol[], + layers = [Transformation, Arithmetic, Aggregation, Comparison], + connection = UInt32[1, 2, 3, 4], + constants = Dict() + ) + len = [length(layer.fn) for layer in layers] + + parindexes = Vector{Int}[] + for layer in layers + lfn = Int[] + for (j, fn) in enumerate(layer.fn) + par = extract_parameters( + fn, + parameters = append!( + copy(USUAL_CONSTRAINT_PARAMETERS), + [:numvars, :dom_size, :op_filter, :filter_val] + ) + ) + if !isempty(par) + if intersect(par[1], parameters) == par[1] + push!(lfn, j) + end + else + push!(lfn, j) + end + end + push!(parindexes, lfn) + end + + # parindexes = [extract_params(layer.fnexprs, parameters) for layer in layers] + weightlen = length.(parindexes) - for layer in layers(icn) - _start = _end + 1 - _end += exclu(layer) ? nbits_exclu(layer) : length(layer) + index, jindex = 0, 0 + consider = Array{Int}(undef, sum(length.(parindexes))) + for (i, layer) in enumerate(layers) + consider[(1:length(parindexes[i])) .+ jindex] .= parindexes[i] .+ index + index += len[i] + jindex += length(parindexes[i]) + end - if exclu(layer) - f_id = as_int(@view weights(icn)[_start:_end]) - # @warn "debug" f_id _end _start weights(icn) (exclu(layer) ? "nbits_exclu(layer)" : "length(layer)") (@view weights(icn)[_start:_end]) - s = symbol(layer, f_id + 1) - push!(funcs, [functions(layer)[s]]) - push!(symbols, [s]) + weights = if isempty(weights) + w = falses(sum(len)) + #@info consider w generate_valid_weights(layers, weightlen) + w[consider] .= generate_new_valid_weights(layers, weightlen) + w else - layer_funcs = Vector{Function}() - layer_symbs = Vector{Symbol}() - for (f_id, b) in enumerate(@view weights(icn)[_start:_end]) - if b - s = symbol(layer, f_id) - push!(layer_funcs, functions(layer)[s]) - push!(layer_symbs, s) + # Checking the provided weights for if they match mutex or not + # TODO: Ask Jefu if this is required or not + #################### + index = 0 + for (i, layer) in enumerate(layers) + if layer.mutex && !( + sum(weights[parindexes[i] .+ index]) == 1 && + sum(weights[1:len[i]] .+ index) == 1 + ) + error("Invalid weights provided") end + index += length(layer.fn) end - push!(funcs, layer_funcs) - push!(symbols, layer_symbs) + #################### + weights end - end + # @warn weights weights[consider] + @assert length(weights) === sum(len) - function composition(x; X = zeros(length(x), length(funcs[1])), dom_size, params...) - tr_in(Tuple(funcs[1]), X, x; params...) - X[1:length(x), 1] .= - 1:length(x) .|> (i -> funcs[2][1](@view X[i, 1:length(funcs[1])])) - return (y -> funcs[4][1](y; dom_size, nvars = length(x), params...))( - funcs[3][1](@view X[:, 1]), + # @error consider + # @info parameters + new{typeof(layers)}( + @view(weights[consider]), + Set(parameters), + layers, + connection, + weightlen, + constants ) end +end - return composition, symbols +function regularization(icn::AbstractICN) + max_op = 0 + op = 0 + start = 1 + for (i, layer) in enumerate(icn.layers) + if !layer.mutex + op += length(findall(icn.weights[start:(start + icn.weightlen[i] - 1)])) + max_op += icn.weightlen[i] + end + start += icn.weightlen[i] + end + return op / (max_op + 1) end + +function create_icn(icn::ICN, parameters::Vector{Symbol}) + ICN( + weights = icn.weights, + parameters = parameters, + layers = icn.layers, + connection = icn.connection + ) +end + +#FIXME - This is a temporary fix +max_icn_length(args...; kargs...) = 42 diff --git a/src/layer.jl b/src/layer.jl index 18be36f..a492ba4 100644 --- a/src/layer.jl +++ b/src/layer.jl @@ -1,102 +1,33 @@ -""" - Layer -A structure to store a `LittleDict` of operations that can be selected during the learning phase of an ICN. If the layer is exclusive, only one operation can be selected at a time. -""" -struct Layer - exclusive::Bool - functions::LittleDict{Symbol,Function} - parameters::Vector{Symbol} -end - -""" - functions(layer) -Access the operations of a layer. The container is ordered. -""" -functions(layer) = layer.functions - -""" - length(layer) -Return the number of operations in a layer. -""" -Base.length(layer::Layer) = length(functions(layer)) - -""" - exclu(layer) -Return `true` if the layer has mutually exclusive operations. -""" -exclu(layer) = layer.exclusive - -""" - symbol(layer, i) -Return the i-th symbols of the operations in a given layer. -""" -symbol(layer, i) = begin - if i > length(layer) - @info layer i functions(layer) - end - collect(keys(functions(layer)))[i] -end - -""" - nbits_exclu(layer) -Convert the length of an exclusive layer into a number of bits. -""" -nbits_exclu(layer) = ceil(Int, log2(length(layer))) - -""" - show_layer(layer) -Return a string that contains the elements in a layer. -""" -show_layer(layer) = layer |> functions |> keys |> string - -""" - selected_size(layer, layer_weights) -Return the number of operations selected by `layer_weights` in `layer`. -""" -selected_size(layer, layer_weights) = exclu(layer) ? 1 : sum(layer_weights) - -""" - is_viable(layer, w) - is_viable(icn) - is_viable(icn, w) -Assert if a pair of layer/icn and weights compose a viable pattern. If no weights are given with an icn, it will check the current internal value. -""" -is_viable(layer::Layer, w) = exclu(layer) ? as_int(w) < length(layer) : any(w) - -""" - generate_inclusive_operations(predicate, bits) - generate_exclusive_operation(max_op_number) -Generates the operations (weights) of a layer with inclusive/exclusive operations. -""" -function generate_inclusive_operations(predicate, bits) - ind = falses(bits) - while true - ind = bitrand(bits) - predicate(ind) && break +abstract type AbstractLayer end + +# const AbstractLayerInput{T, N} = Union{AbstractVector{T}, NTuple{T, N}} # consider this in the future + +struct LayerCore <: AbstractLayer + name::Symbol + mutex::Bool + argtype::Pair + fnexprs::NamedTuple{ + names, T} where {names, T <: Tuple{Vararg{<:Union{Symbol, JLFunction}}}} + fn::NamedTuple{names, T} where {names, T <: Tuple{Vararg{Function}}} + function LayerCore(name::Symbol, mutex::Bool, Q::Pair, fnexprs) + fnexprs = map(x -> JLFunction(x), fnexprs) + for jlexp in fnexprs + #= + if isnothing(jlexp.rettype) + jlexp.rettype = Q[2] + end + =# + for (i, arg) in enumerate(jlexp.args) + if arg isa Symbol + jlexp.args[i] = Expr(:(::), arg, Q[1][i]) + end + end + if isnothing(jlexp.kwargs) + jlexp.kwargs = [:(params...)] + else + push!(jlexp.kwargs, :(params...)) + end + end + new(name, mutex, Q, fnexprs, map(x -> eval(codegen_ast(x)), fnexprs)) end - return ind -end - -""" - generate_exclusive_operation(max_op_number) -Generates the operations (weights) of a layer with exclusive operations. -""" -function generate_exclusive_operation(max_op_number) - op = rand(1:max_op_number) - return as_bitvector(op, max_op_number) -end - -""" - generate_weights(layers) - generate_weights(icn) -Generate the weights of a collection of layers or of an ICN. -""" -function generate_weights(layers) - bitvecs = map( - l -> - exclu(l) ? generate_exclusive_operation(length(l)) : - generate_inclusive_operations(any, length(l)), - layers, - ) - return vcat(bitvecs...) end diff --git a/src/layers/aggregation.jl b/src/layers/aggregation.jl index 2d287ee..0cfaf68 100644 --- a/src/layers/aggregation.jl +++ b/src/layers/aggregation.jl @@ -1,36 +1,42 @@ +const Aggregation = LayerCore( + :Aggregation, + true, + (:(AbstractVector{<:Real}),) => T where {T <: Real}, + ( + sum = :((x) -> sum(x)), + count_positive = :((x) -> count(i -> i > 0, x)), + count_op_val = :((x; val, op) -> count(i -> op(i, val), x)), + maximum = :((x) -> isempty(x) ? typemax(eltype(x)) : maximum(x)), + minimum = :((x) -> isempty(x) ? typemax(eltype(x)) : minimum(x)) + ) +) + +# SECTION - Docstrings to put back/update """ ag_sum(x) Aggregate through `+` a vector into a single scalar. """ -ag_sum(x) = sum(x) """ ag_count_positive(x) Count the number of strictly positive elements of `x`. """ -ag_count_positive(x) = count(y -> y > 0.0, x) """ aggregation_layer() Generate the layer of aggregations of the ICN. The operations are mutually exclusive, that is only one will be selected. """ -function aggregation_layer() - aggregations = - LittleDict{Symbol,Function}(:sum => ag_sum, :count_positive => ag_count_positive) - - return Layer(true, aggregations, Vector{Symbol}()) -end ## SECTION - Test Items -@testitem "Aggregation Layer" tags = [:aggregation, :layer] begin - CN = CompositionalNetworks +# @testitem "Aggregation Layer" tags = [:aggregation, :layer] begin +# CN = CompositionalNetworks - data = [[1, 5, 2, 4, 3] => 2, [1, 2, 3, 2, 1] => 2] +# data = [[1, 5, 2, 4, 3] => 2, [1, 2, 3, 2, 1] => 2] - @test CN.ag_sum(data[1].first) == 15 - @test CN.ag_sum(data[2].first) == 9 +# @test CN.ag_sum(data[1].first) == 15 +# @test CN.ag_sum(data[2].first) == 9 - @test CN.ag_count_positive(data[1].first) == 5 - @test CN.ag_count_positive(data[2].first) == 5 - @test CN.ag_count_positive([1, 0, 1, 0, 1]) == 3 -end +# @test CN.ag_count_positive(data[1].first) == 5 +# @test CN.ag_count_positive(data[2].first) == 5 +# @test CN.ag_count_positive([1, 0, 1, 0, 1]) == 3 +# end diff --git a/src/layers/arithmetic.jl b/src/layers/arithmetic.jl index 76f42b0..b29a0ac 100644 --- a/src/layers/arithmetic.jl +++ b/src/layers/arithmetic.jl @@ -1,32 +1,33 @@ +const Arithmetic = LayerCore( + :Arithmetic, + true, + (:(AbstractVector{<:AbstractVector{<:Real}}),) => AbstractVector{<:Real}, + (sum = :((x) -> sum(x)), product = :((x) -> reduce((t...) -> broadcast(*, t...), x))) +) + +# SECTION - Docstrings to put back/update """ ar_sum(x) Reduce `k = length(x)` vectors through sum to a single vector. """ -ar_sum(x) = sum(x) """ ar_prod(x) Reduce `k = length(x)` vectors through product to a single vector. """ -ar_prod(x) = reduce((y, z) -> y .* z, x) """ arithmetic_layer() Generate the layer of arithmetic operations of the ICN. The operations are mutually exclusive, that is only one will be selected. """ -function arithmetic_layer() - arithmetics = LittleDict{Symbol,Function}(:sum => ar_sum, :prod => ar_prod) - - return Layer(true, arithmetics, Vector{Symbol}()) -end ## SECTION - Test Items -@testitem "Arithmetic Layer" tags = [:arithmetic, :layer] begin - CN = CompositionalNetworks +# @testitem "Arithmetic Layer" tags = [:arithmetic, :layer] begin +# CN = CompositionalNetworks - data = [[1, 5, 2, 4, 3] => 2, [1, 2, 3, 2, 1] => 2] +# data = [[1, 5, 2, 4, 3] => 2, [1, 2, 3, 2, 1] => 2] - @test CN.ar_sum(map(p -> p.first, data)) == [2, 7, 5, 6, 4] - @test CN.ar_prod(map(p -> p.first, data)) == [1, 10, 6, 8, 3] +# @test CN.ar_sum(map(p -> p.first, data)) == [2, 7, 5, 6, 4] +# @test CN.ar_prod(map(p -> p.first, data)) == [1, 10, 6, 8, 3] -end +# end diff --git a/src/layers/comparison.jl b/src/layers/comparison.jl index 5f5bed5..6398a86 100644 --- a/src/layers/comparison.jl +++ b/src/layers/comparison.jl @@ -1,159 +1,153 @@ +const Comparison = LayerCore( + :Comparison, + true, + (:(Real),) => Real, + ( + id = :((x) -> identity(x)), + abs_val = :((x; val) -> abs(x - val)), + val_minus_var = :((x; val) -> maximum((0, val - x))), + var_minus_val = :((x; val) -> maximum((0, x - val))), + euclidean_val = :( + (x; val, dom_size) -> x == val ? 0 : (1 + (abs(x - val) / dom_size)) + ), + euclidean_val_op = :( + (x; op, val, dom_size) -> op(x, val) ? 0 : (1 + (abs(x - val) / dom_size)) + ), + euclidean = :((x; dom_size) -> x == 0 ? 0 : (1 + (x / dom_size))), + euclidean_op = :((x; op, dom_size) -> op(x, 0) ? 0 : (1 + (x / dom_size))), + var_minus_numvars = :((x; numvars) -> abs(x - numvars)), + max_numvars_minus_var = :((x; numvars) -> maximum((0, numvars - x))), + max_var_minus_numvars = :((x; numvars) -> maximum((x - numvars, 0))), + vals_minus_var_gele = :( + (x; + vals) -> length(vals) != 2 ? typemax(eltype(x)) : + ( + vals[1] <= x <= vals[2] ? 0 : + minimum((abs(x - vals[1]), abs(x - vals[2]))) + ) + ), + vals_minus_var_gl = :( + (x; + vals) -> length(vals) != 2 ? typemax(eltype(x)) : + (vals[1] < x < vals[2] ? 0 : + minimum((abs(x - vals[1]), abs(x - vals[2])))) + ) # var_minus_val=:((x; vals) -> maximum((0, (x .- vals)...))), # euclidean_val=:((x; vals, dom_size) -> x in vals ? 0 : (1 + (abs((length(vals) * x) - sum(vals)) / dom_size))), + ) +) + +# TODO: Add more operations in comparison + +# SECTION - Docstrings to put back/update """ co_identity(x) Identity function. Already defined in Julia as `identity`, specialized for scalars in the `comparison` layer. """ -co_identity(x; params...) = identity(x) """ co_abs_diff_var_val(x; val) Return the absolute difference between `x` and `val`. """ -co_abs_diff_var_val(x; val, params...) = abs(x - val) """ co_var_minus_val(x; val) Return the difference `x - val` if positive, `0.0` otherwise. """ -co_var_minus_val(x; val, params...) = max(0.0, x - val) """ co_val_minus_var(x; val) Return the difference `val - x` if positive, `0.0` otherwise. """ -co_val_minus_var(x; val, params...) = max(0.0, val - x) """ co_euclidean_val(x; val, dom_size) Compute an euclidean norm with domain size `dom_size`, weighted by `val`, of a scalar. """ -function co_euclidean_val(x; val, dom_size, params...) - return x == val ? 0.0 : (1.0 + abs(x - val) / dom_size) -end """ co_euclidean(x; dom_size) Compute an euclidean norm with domain size `dom_size` of a scalar. """ -function co_euclidean(x; dom_size, params...) - return co_euclidean_val(x; val = 0.0, dom_size) -end """ co_abs_diff_var_vars(x; nvars) Return the absolute difference between `x` and the number of variables `nvars`. """ -co_abs_diff_var_vars(x; nvars, params...) = abs(x - nvars) """ co_var_minus_vars(x; nvars) Return the difference `x - nvars` if positive, `0.0` otherwise, where `nvars` denotes the numbers of variables. """ -co_var_minus_vars(x; nvars, params...) = co_var_minus_val(x; val = nvars) """ co_vars_minus_var(x; nvars) Return the difference `nvars - x` if positive, `0.0` otherwise, where `nvars` denotes the numbers of variables. """ -co_vars_minus_var(x; nvars, params...) = co_val_minus_var(x; val = nvars) - -# Parametric layers """ make_comparisons(param::Symbol) Generate the comparison functions for the given parameter. """ -make_comparisons(param::Symbol) = make_comparisons(Val(param)) - -function make_comparisons(::Val{:none}) - return LittleDict{Symbol,Function}( - :identity => co_identity, - :euclidean => co_euclidean, - :abs_diff_var_vars => co_abs_diff_var_vars, - :var_minus_vars => co_var_minus_vars, - :vars_minus_var => co_vars_minus_var, - ) -end - -function make_comparisons(::Val{:val}) - return LittleDict{Symbol,Function}( - :abs_diff_var_val => co_abs_diff_var_val, - :var_minus_val => co_var_minus_val, - :val_minus_var => co_val_minus_var, - :euclidean_val => co_euclidean_val, - ) -end - """ comparison_layer(param = false) Generate the layer of transformations functions of the ICN. Iff `param` value is set, also includes all the parametric comparison with that value. The operations are mutually exclusive, that is only one will be selected. """ -function comparison_layer(parameters = Vector{Symbol}()) - comparisons = make_comparisons(:none) - - for p in parameters - comparisons_param = make_comparisons(p) - comparisons = LittleDict{Symbol,Function}(union(comparisons, comparisons_param)) - end - - return Layer(true, comparisons, parameters) -end ## SECTION - Test Items -@testitem "Comparison Layer" tags = [:comparison, :layer] begin - CN = CompositionalNetworks - - data = [3 => (1, 5), 5 => (10, 5)] - - funcs = [CN.co_identity => [3, 5]] - - # test no param/vars - for (f, results) in funcs - for (key, vals) in enumerate(data) - @test f(vals.first) == results[key] - end - end - - funcs_param = [ - CN.co_abs_diff_var_val => [2, 5], - CN.co_var_minus_val => [2, 0], - CN.co_val_minus_var => [0, 5], - ] - - for (f, results) in funcs_param - for (key, vals) in enumerate(data) - @test f(vals.first; val = vals.second[1]) == results[key] - end - end - - funcs_vars = [ - CN.co_abs_diff_var_vars => [2, 0], - CN.co_var_minus_vars => [0, 0], - CN.co_vars_minus_var => [2, 0], - ] - - for (f, results) in funcs_vars - for (key, vals) in enumerate(data) - @test f(vals.first, nvars = vals.second[2]) == results[key] - end - end - - funcs_val_dom = [CN.co_euclidean_val => [1.4, 2.0]] - - for (f, results) in funcs_val_dom - for (key, vals) in enumerate(data) - @test f(vals.first, val = vals.second[1], dom_size = vals.second[2]) ≈ - results[key] - end - end - - funcs_dom = [CN.co_euclidean => [1.6, 2.0]] - - for (f, results) in funcs_dom - for (key, vals) in enumerate(data) - @test f(vals.first, dom_size = vals.second[2]) ≈ results[key] - end - end - -end +# @testitem "Comparison Layer" tags = [:comparison, :layer] begin +# CN = CompositionalNetworks + +# data = [3 => (1, 5), 5 => (10, 5)] + +# funcs = [CN.co_identity => [3, 5]] + +# # test no param/vars +# for (f, results) in funcs +# for (key, vals) in enumerate(data) +# @test f(vals.first) == results[key] +# end +# end + +# funcs_param = [ +# CN.co_abs_diff_var_val => [2, 5], +# CN.co_var_minus_val => [2, 0], +# CN.co_val_minus_var => [0, 5], +# ] + +# for (f, results) in funcs_param +# for (key, vals) in enumerate(data) +# @test f(vals.first; val = vals.second[1]) == results[key] +# end +# end + +# funcs_vars = [ +# CN.co_abs_diff_var_vars => [2, 0], +# CN.co_var_minus_vars => [0, 0], +# CN.co_vars_minus_var => [2, 0], +# ] + +# for (f, results) in funcs_vars +# for (key, vals) in enumerate(data) +# @test f(vals.first, nvars = vals.second[2]) == results[key] +# end +# end + +# funcs_val_dom = [CN.co_euclidean_val => [1.4, 2.0]] + +# for (f, results) in funcs_val_dom +# for (key, vals) in enumerate(data) +# @test f(vals.first, val = vals.second[1], dom_size = vals.second[2]) ≈ +# results[key] +# end +# end + +# funcs_dom = [CN.co_euclidean => [1.6, 2.0]] + +# for (f, results) in funcs_dom +# for (key, vals) in enumerate(data) +# @test f(vals.first, dom_size = vals.second[2]) ≈ results[key] +# end +# end + +# end diff --git a/src/layers/pairedmap.jl b/src/layers/pairedmap.jl new file mode 100644 index 0000000..f54ef00 --- /dev/null +++ b/src/layers/pairedmap.jl @@ -0,0 +1,11 @@ +const PairedMap = LayerCore( + :PairedMap, + true, + (:(AbstractVector{<:Real}),) => AbstractVector{<:Real}, + ( + id = :((x) -> identity(x)), + sub = :((x; pair_vars) -> abs.(x .- pair_vars)), + sum = :((x; pair_vars) -> (x .+ pair_vars)), + prod = :((x; pair_vars) -> (x .* pair_vars)) + ) +) diff --git a/src/layers/simple_filter.jl b/src/layers/simple_filter.jl new file mode 100644 index 0000000..37b5f5f --- /dev/null +++ b/src/layers/simple_filter.jl @@ -0,0 +1,23 @@ +const SimpleFilter = LayerCore( + :SimpleFilter, + true, + (:(AbstractVector{<:Real}),) => AbstractVector{<:Real}, + ( + id = :((x) -> identity(x)), + filter_unique = :((x) -> unique(x)), + filter_elem = :((x; id) -> [x[id]]), + filter_op_val = :((x; val, op) -> filter(t -> op(t, val), x)), + filter_id = :( + (x; id) -> [x[id], 0 < x[id] <= length(x) ? -x[x[id]] : typemax(eltype(x))] + ), + filter_equal_val = :((x; val) -> filter(t -> t == val, x)), + filter_ge_val = :((x; val) -> filter(t -> t >= val, x)), + filter_great_val = :((x; val) -> filter(t -> t > val, x)), + filter_less_val = :((x; val) -> filter(t -> t < val, x)), + filter_le_val = :((x; val) -> filter(t -> t <= val, x)), + filter_ne_val = :((x; val) -> filter(t -> t != val, x)), + filter_op_vals = :((x; vals, op) -> filter(t -> prod(op.(t, vals)), x)), + filter_equal_vals = :((x; vals) -> filter(t -> t in vals, x)), + filter_ne_vals = :((x; vals) -> filter(t -> !(t in vals), x)) + ) +) diff --git a/src/layers/transformation.jl b/src/layers/transformation.jl index b9f33d6..9a4a007 100644 --- a/src/layers/transformation.jl +++ b/src/layers/transformation.jl @@ -1,4 +1,56 @@ -# Identity +const Transformation = LayerCore( + :Transformation, + false, + (:(AbstractVector{<:Real}),) => AbstractVector{<:Real}, + ( + id = :((x) -> identity(x)), + count_equal_right = :( + (x) -> map(i -> count(t -> t == x[i], @view(x[(i + 1):end])), eachindex(x)) + ), + count_less_right = :( + (x) -> map(i -> count(t -> t < x[i], @view(x[(i + 1):end])), eachindex(x)) + ), + count_great_right = :( + (x) -> map(i -> count(t -> t > x[i], @view(x[(i + 1):end])), eachindex(x)) + ), + count_equal_left = :( + (x) -> map(i -> count(t -> t == x[i], @view(x[1:(i - 1)])), eachindex(x)) + ), + count_less_left = :( + (x) -> map(i -> count(t -> t < x[i], @view(x[1:(i - 1)])), eachindex(x)) + ), + count_great_left = :( + (x) -> map(i -> count(t -> t > x[i], @view(x[1:(i - 1)])), eachindex(x)) + ), + count_equal_val = :((x; val) -> map(i -> count(t -> t == (i + val), x), x)), + count_less_val = :((x; val) -> map(i -> count(t -> t < (i + val), x), x)), + count_great_val = :((x; val) -> map(i -> count(t -> t > (i + val), x), x)), + var_minus_val = :((x; val) -> map(i -> max(0, i - val), x)), + val_minus_var = :((x; val) -> map(i -> max(0, val - i), x)), + contiguous_vars_minus = :( + (x) -> map( + i -> i == length(x) ? 0 : max(0, x[i] - x[i + 1]), + eachindex(x[1:end]) + ) + ), + contiguous_vars_minus_rev = :( + (x) -> map( + i -> i == length(x) ? 0 : max(0, x[i + 1] - x[i]), + eachindex(x[1:end]) + ) + ), + count_equal = :((x) -> map(i -> count(t -> t == i, x), x)), + count_less = :((x) -> map(i -> count(t -> t < i, x), x)), + count_great = :((x) -> map(i -> count(t -> t > i, x), x)), + count_bounding_val = :( + (x; val) -> map(i -> count(t -> t >= i && t <= i + val, x), x) + ), + var_minus_vals = :((x; vals) -> map(i -> max(0, (i .- vals)...), x)), + vals_minus_var = :((x; vals) -> map(i -> max(0, (vals .- i)...), x)) + ) +) + +# SECTION - Docstrings to put back/update """ tr_identity(i, x) @@ -8,10 +60,6 @@ Identity function. Already defined in Julia as `identity`, specialized for vectors. When `X` is provided, the result is computed without allocations. """ -tr_identity(i, x; params...) = identity(x[i]) -lazy(tr_identity) - -# Count equalities """ tr_count_eq(i, x) @@ -21,7 +69,6 @@ lazy(tr_identity) Count the number of elements equal to `x[i]`. Extended method to vector with sig `(x)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_eq(i, x; params...) = count(y -> x[i] == y, x) - 1 """ tr_count_eq_right(i, x) @@ -31,7 +78,6 @@ tr_count_eq(i, x; params...) = count(y -> x[i] == y, x) - 1 Count the number of elements to the right of and equal to `x[i]`. Extended method to vector with sig `(x)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_eq_right(i, x; params...) = tr_count_eq(1, @view x[i:end]) """ tr_count_eq_left(i, x) @@ -41,12 +87,6 @@ tr_count_eq_right(i, x; params...) = tr_count_eq(1, @view x[i:end]) Count the number of elements to the left of and equal to `x[i]`. Extended method to vector with sig `(x)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_eq_left(i, x; params...) = tr_count_eq(i, @view x[1:i]) - -# Generating vetorized versions -lazy(tr_count_eq, tr_count_eq_left, tr_count_eq_right) - -# Count greater/lesser """ tr_count_greater(i, x) @@ -56,7 +96,6 @@ lazy(tr_count_eq, tr_count_eq_left, tr_count_eq_right) Count the number of elements greater than `x[i]`. Extended method to vector with sig `(x)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_greater(i, x; params...) = count(y -> x[i] < y, x) """ tr_count_lesser(i, x) @@ -66,7 +105,6 @@ tr_count_greater(i, x; params...) = count(y -> x[i] < y, x) Count the number of elements lesser than `x[i]`. Extended method to vector with sig `(x)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_lesser(i, x; params...) = count(y -> x[i] > y, x) """ tr_count_g_left(i, x) @@ -76,7 +114,6 @@ tr_count_lesser(i, x; params...) = count(y -> x[i] > y, x) Count the number of elements to the left of and greater than `x[i]`. Extended method to vector with sig `(x)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_g_left(i, x; params...) = tr_count_greater(i, @view x[1:i]) """ tr_count_l_left(i, x) @@ -86,7 +123,6 @@ tr_count_g_left(i, x; params...) = tr_count_greater(i, @view x[1:i]) Count the number of elements to the left of and lesser than `x[i]`. Extended method to vector with sig `(x)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_l_left(i, x; params...) = tr_count_lesser(i, @view x[1:i]) """ tr_count_g_right(i, x) @@ -95,7 +131,6 @@ tr_count_l_left(i, x; params...) = tr_count_lesser(i, @view x[1:i]) Count the number of elements to the right of and greater than `x[i]`. Extended method to vector with sig `(x)` are generated. """ -tr_count_g_right(i, x; params...) = tr_count_greater(1, @view x[i:end]) """ tr_count_l_right(i, x) @@ -105,13 +140,6 @@ tr_count_g_right(i, x; params...) = tr_count_greater(1, @view x[i:end]) Count the number of elements to the right of and lesser than `x[i]`. Extended method to vector with sig `(x)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_l_right(i, x; params...) = tr_count_lesser(1, @view x[i:end]) - -# Generating vetorized versions -lazy(tr_count_greater, tr_count_g_left, tr_count_g_right) -lazy(tr_count_lesser, tr_count_l_left, tr_count_l_right) - -# Count val parameter """ tr_count_eq_val(i, x; val) @@ -121,7 +149,6 @@ lazy(tr_count_lesser, tr_count_l_left, tr_count_l_right) Count the number of elements equal to `x[i] + val`. Extended method to vector with sig `(x, val)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_eq_val(i, x; val, params...) = count(y -> y == x[i] + val, x) """ tr_count_l_val(i, x; val) @@ -131,7 +158,6 @@ tr_count_eq_val(i, x; val, params...) = count(y -> y == x[i] + val, x) Count the number of elements lesser than `x[i] + val`. Extended method to vector with sig `(x, val)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_l_val(i, x; val, params...) = count(y -> y < x[i] + val, x) """ tr_count_g_val(i, x; val) @@ -141,12 +167,6 @@ tr_count_l_val(i, x; val, params...) = count(y -> y < x[i] + val, x) Count the number of elements greater than `x[i] + val`. Extended method to vector with sig `(x, val)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_count_g_val(i, x; val, params...) = count(y -> y > x[i] + val, x) - -# Generating vectorized versions -lazy_param(tr_count_eq_val, tr_count_l_val, tr_count_g_val) - -# Bounding val parameter """ tr_count_bounding_val(i, x; val) @@ -156,12 +176,6 @@ lazy_param(tr_count_eq_val, tr_count_l_val, tr_count_g_val) Count the number of elements bounded (not strictly) by `x[i]` and `x[i] + val`. An extended method to vector with sig `(x, val)` is generated. When `X` is provided, the result is computed without allocations. """ -tr_count_bounding_val(i, x; val, params...) = count(y -> x[i] ≤ y ≤ x[i] + val, x) - -# Generating vectorized versions -lazy_param(tr_count_bounding_val) - -# Variable/parameter values subtractions """ tr_var_minus_val(i, x; val) @@ -171,7 +185,6 @@ lazy_param(tr_count_bounding_val) Return the difference `x[i] - val` if positive, `0.0` otherwise. Extended method to vector with sig `(x, val)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_var_minus_val(i, x; val, params...) = max(0, x[i] - val) """ tr_val_minus_var(i, x; val) @@ -181,12 +194,7 @@ tr_var_minus_val(i, x; val, params...) = max(0, x[i] - val) Return the difference `val - x[i]` if positive, `0.0` otherwise. Extended method to vector with sig `(x, val)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_val_minus_var(i, x; val, params...) = max(0, val - x[i]) - -# Generating vetorized versions -lazy_param(tr_var_minus_val, tr_val_minus_var) -# Continuous values subtraction """ tr_contiguous_vars_minus(i, x) tr_contiguous_vars_minus(x) @@ -195,8 +203,6 @@ lazy_param(tr_var_minus_val, tr_val_minus_var) Return the difference `x[i] - x[i + 1]` if positive, `0.0` otherwise. Extended method to vector with sig `(x)` are generated. When `X` is provided, the result is computed without allocations. """ -tr_contiguous_vars_minus(i, x; params...) = - length(x) == i ? 0 : tr_var_minus_val(i, x; val = x[i+1]) """ tr_contiguous_vars_minus_rev(i, x) @@ -206,12 +212,6 @@ tr_contiguous_vars_minus(i, x; params...) = Return the difference `x[i + 1] - x[i]` if positive, `0.0` otherwise. Extended method to vector with sig `(x)` are generated. When `X` is provided, the result is computed without allocations. """ -function tr_contiguous_vars_minus_rev(i, x; params...) - return length(x) == i ? 0 : tr_val_minus_var(i, x; val = x[i+1]) -end - -# Generating vetorized versions -lazy(tr_contiguous_vars_minus, tr_contiguous_vars_minus_rev) """ make_transformations(param::Symbol) @@ -260,100 +260,56 @@ val_transforms = make_transformations(:val) count_eq_param_result = val_transforms[:count_eq_param](data, param) ``` """ -make_transformations(param::Symbol) = make_transformations(Val(param)) - -function make_transformations(::Val{:none}) - return LittleDict{Symbol,Function}( - :identity => tr_identity, - :count_eq => tr_count_eq, - :count_eq_left => tr_count_eq_left, - :count_eq_right => tr_count_eq_right, - :count_greater => tr_count_greater, - :count_lesser => tr_count_lesser, - :count_g_left => tr_count_g_left, - :count_l_left => tr_count_l_left, - :count_g_right => tr_count_g_right, - :count_l_right => tr_count_l_right, - :contiguous_vars_minus => tr_contiguous_vars_minus, - :contiguous_vars_minus_rev => tr_contiguous_vars_minus_rev, - ) -end - -function make_transformations(::Val{:val}) - return LittleDict{Symbol,Function}( - :count_eq_val => tr_count_eq_val, - :count_l_val => tr_count_l_val, - :count_g_val => tr_count_g_val, - :count_bounding_val => tr_count_bounding_val, - :var_minus_val => tr_var_minus_val, - :val_minus_var => tr_val_minus_var, - ) -end - -function make_transformations(::Val) - return LittleDict{Symbol,Function}() -end - """ transformation_layer(param = Vector{Symbol}()) Generate the layer of transformations functions of the ICN. Iff `param` value is non empty, also includes all the related parametric transformations. """ -function transformation_layer(parameters = Vector{Symbol}()) - transformations = make_transformations(:none) - - for p in parameters - transformations_param = make_transformations(p) - transformations = LittleDict(union(transformations, transformations_param)) - end - - return Layer(false, transformations, parameters) -end ## SECTION - Test Items -@testitem "Transformation Layer" tags = [:transformation, :layer] begin - CN = CompositionalNetworks - - data = [[1, 5, 2, 4, 3] => 2, [1, 2, 3, 2, 1] => 2] - - # Test transformations without parameters - funcs = Dict( - CN.tr_identity => [data[1].first, data[2].first], - CN.tr_count_eq => [[0, 0, 0, 0, 0], [1, 1, 0, 1, 1]], - CN.tr_count_eq_right => [[0, 0, 0, 0, 0], [1, 1, 0, 0, 0]], - CN.tr_count_eq_left => [[0, 0, 0, 0, 0], [0, 0, 0, 1, 1]], - CN.tr_count_greater => [[4, 0, 3, 1, 2], [3, 1, 0, 1, 3]], - CN.tr_count_lesser => [[0, 4, 1, 3, 2], [0, 2, 4, 2, 0]], - CN.tr_count_g_left => [[0, 0, 1, 1, 2], [0, 0, 0, 1, 3]], - CN.tr_count_l_left => [[0, 1, 1, 2, 2], [0, 1, 2, 1, 0]], - CN.tr_count_g_right => [[4, 0, 2, 0, 0], [3, 1, 0, 0, 0]], - CN.tr_count_l_right => [[0, 3, 0, 1, 0], [0, 1, 2, 1, 0]], - CN.tr_contiguous_vars_minus => [[0, 3, 0, 1, 0], [0, 0, 1, 1, 0]], - CN.tr_contiguous_vars_minus_rev => [[4, 0, 2, 0, 0], [1, 1, 0, 0, 0]], - ) - - for (f, results) in funcs - for (key, vals) in enumerate(data) - @test f(vals.first) == results[key] - foreach(i -> f(i, vals.first), vals.first) - end - end - - # Test transformations with parameter - funcs_val = Dict( - CN.tr_count_eq_val => [[1, 0, 1, 0, 1], [1, 0, 0, 0, 1]], - CN.tr_count_l_val => [[2, 5, 3, 5, 4], [4, 5, 5, 5, 4]], - CN.tr_count_g_val => [[2, 0, 1, 0, 0], [0, 0, 0, 0, 0]], - CN.tr_count_bounding_val => [[3, 1, 3, 2, 3], [5, 3, 1, 3, 5]], - CN.tr_var_minus_val => [[0, 3, 0, 2, 1], [0, 0, 1, 0, 0]], - CN.tr_val_minus_var => [[1, 0, 0, 0, 0], [1, 0, 0, 0, 1]], - ) - - for (f, results) in funcs_val - for (key, vals) in enumerate(data) - @test f(vals.first; val = vals.second) == results[key] - foreach(i -> f(i, vals.first; val = vals.second), vals.first) - end - end - -end +# @testitem "Transformation Layer" tags = [:transformation, :layer] begin +# CN = CompositionalNetworks + +# data = [[1, 5, 2, 4, 3] => 2, [1, 2, 3, 2, 1] => 2] + +# # Test transformations without parameters +# funcs = Dict( +# CN.tr_identity => [data[1].first, data[2].first], +# CN.tr_count_eq => [[0, 0, 0, 0, 0], [1, 1, 0, 1, 1]], +# CN.tr_count_eq_right => [[0, 0, 0, 0, 0], [1, 1, 0, 0, 0]], +# CN.tr_count_eq_left => [[0, 0, 0, 0, 0], [0, 0, 0, 1, 1]], +# CN.tr_count_greater => [[4, 0, 3, 1, 2], [3, 1, 0, 1, 3]], +# CN.tr_count_lesser => [[0, 4, 1, 3, 2], [0, 2, 4, 2, 0]], +# CN.tr_count_g_left => [[0, 0, 1, 1, 2], [0, 0, 0, 1, 3]], +# CN.tr_count_l_left => [[0, 1, 1, 2, 2], [0, 1, 2, 1, 0]], +# CN.tr_count_g_right => [[4, 0, 2, 0, 0], [3, 1, 0, 0, 0]], +# CN.tr_count_l_right => [[0, 3, 0, 1, 0], [0, 1, 2, 1, 0]], +# CN.tr_contiguous_vars_minus => [[0, 3, 0, 1, 0], [0, 0, 1, 1, 0]], +# CN.tr_contiguous_vars_minus_rev => [[4, 0, 2, 0, 0], [1, 1, 0, 0, 0]], +# ) + +# for (f, results) in funcs +# for (key, vals) in enumerate(data) +# @test f(vals.first) == results[key] +# foreach(i -> f(i, vals.first), vals.first) +# end +# end + +# # Test transformations with parameter +# funcs_val = Dict( +# CN.tr_count_eq_val => [[1, 0, 1, 0, 1], [1, 0, 0, 0, 1]], +# CN.tr_count_l_val => [[2, 5, 3, 5, 4], [4, 5, 5, 5, 4]], +# CN.tr_count_g_val => [[2, 0, 1, 0, 0], [0, 0, 0, 0, 0]], +# CN.tr_count_bounding_val => [[3, 1, 3, 2, 3], [5, 3, 1, 3, 5]], +# CN.tr_var_minus_val => [[0, 3, 0, 2, 1], [0, 0, 1, 0, 0]], +# CN.tr_val_minus_var => [[1, 0, 0, 0, 0], [1, 0, 0, 0, 1]], +# ) + +# for (f, results) in funcs_val +# for (key, vals) in enumerate(data) +# @test f(vals.first; val = vals.second) == results[key] +# foreach(i -> f(i, vals.first; val = vals.second), vals.first) +# end +# end + +# end diff --git a/src/learn_and_explore.jl b/src/learn_and_explore.jl new file mode 100644 index 0000000..92ee378 --- /dev/null +++ b/src/learn_and_explore.jl @@ -0,0 +1,63 @@ +function generate_configurations( + concept::Function, + domains::Vector{<:DiscreteDomain}; + parameters... +)::Configurations + output = explore(domains, concept; parameters...) + Set([Solution.(collect.(output[1]))..., NonSolution.(collect.(output[2]))...]) +end + +function explore_learn( + domains::Vector{<:DiscreteDomain}, + concept::Function, + optimizer_config::T; + icn = ICN(; parameters = [:dom_size, :num_variables]), + configurations = nothing, + metric_function = [hamming, manhattan], + parameters... +) where {T <: AbstractOptimizer} + #= + if :vals in icn.parameters && haskey(parameters, :vals) + vals = parameters[:vals] + param = Base.structdiff((; parameters...,), NamedTuple{(:vals,)}) + params = [(val=i, param...) for i in vals] + + new_icn = deepcopy(icn) + delete!(new_icn.parameters.dict, :vals) + push!(new_icn.parameters, :val) + + p = Pair{<:AbstractICN,Bool}[] + Threads.@threads for i in 1:length(params) + + if isnothing(configurations) + concept_new = ((x; parames...) -> concept(x; vals=(f = y -> (z = copy(y); z[i] = parames[:val]; z); f(vals)), param...)) + configurations = generate_configurations(concept_new, domains; params[i]...) + end + + icn.constants[:dom_size] = maximum(length, domains) + icn.constants[:numvars] = length(rand(configurations).x) + + deep_icn = deepcopy(new_icn) + + push!(p, optimize!(deep_icn, configurations, metric_function, optimizer_config; icn.constants..., params[i]...)) + end + return p + else + =# + if isnothing(configurations) + configurations = generate_configurations(concept, domains; parameters...) + end + + icn.constants[:dom_size] = maximum(length, domains) + icn.constants[:numvars] = length(domains) + + return optimize!( + icn, + configurations, + metric_function, + optimizer_config; + icn.constants..., + parameters... + ) + # end +end diff --git a/src/metrics.jl b/src/metrics.jl index 542c6e8..b9b24f1 100644 --- a/src/metrics.jl +++ b/src/metrics.jl @@ -4,6 +4,18 @@ Compute the hamming distance of `x` over a collection of solutions `X`, i.e. the """ hamming(x, X) = mapreduce(y -> Distances.hamming(x, y), min, X) +function hamming( + icn::AbstractICN, + configurations::Configurations, + solution_vector; + parameters... +) + sum( + x -> abs(evaluate(icn, x; parameters...) - hamming(x.x, solution_vector)), + configurations + ) +end + """ minkowski(x, X, p) """ @@ -14,6 +26,18 @@ minkowski(x, X, p) = mapreduce(y -> Distances.minkowski(x, y, p), min, X) """ manhattan(x, X) = mapreduce(y -> Distances.cityblock(x, y), min, X) +function manhattan( + icn::AbstractICN, + configurations::Configurations, + solution_vector; + parameters... +) + sum( + x -> abs(evaluate(icn, x; parameters...) - manhattan(x.x, solution_vector)), + configurations + ) / (get(icn.constants, :dom_size, 2) - 1) +end + """ weights_bias(x) A metric that bias `x` towards operations with a lower bit. Do not affect the main metric. diff --git a/src/optimizer.jl b/src/optimizer.jl new file mode 100644 index 0000000..fe4334d --- /dev/null +++ b/src/optimizer.jl @@ -0,0 +1,137 @@ +abstract type AbstractOptimizer end + +function optimize!(icn, configurations, metric_function, optimizer_config; parameters...) + error("No backend loaded") +end + +# SECTION - GeneticOptimizer Extension +struct GeneticOptimizer <: AbstractOptimizer + global_iter::Int + local_iter::Int + memoize::Bool + pop_size::Int + sampler::Union{Nothing, Function} +end + +@testitem "GeneticOptimizer" tags=[:extension] default_imports=false begin + import CompositionalNetworks: + Transformation, Arithmetic, Aggregation, Comparison, ICN, + SimpleFilter + import CompositionalNetworks: GeneticOptimizer, explore_learn + import ConstraintDomains: domain + import Evolutionary + import Test: @test + + test_icn=ICN(; + parameters = [:dom_size, :numvars, :val], + layers = [Transformation, Arithmetic, Aggregation, Comparison], + connection = [1, 2, 3, 4] + ) + + function allunique_val(x; val) + for i in 1:(length(x) - 1) + for j in (i + 1):length(x) + if x[i]==x[j] + if x[i]!=val + return false + end + end + end + end + return true + end + + function allunique_vals(x; vals) + for i in 1:(length(x) - 1) + for j in (i + 1):length(x) + if x[i]==x[j] + if !(x[i] in vals) + return false + end + end + end + end + return true + end + + @test explore_learn( + [domain([1, 2, 3, 4]) for i in 1:4], + allunique_val, + GeneticOptimizer(), + icn = test_icn, + val = 3 + )[2] + + new_test_icn=ICN(; + parameters = [:dom_size, :numvars, :vals], + layers = [SimpleFilter, Transformation, Arithmetic, Aggregation, Comparison], + connection = [1, 2, 3, 4, 5] + ) + + @test explore_learn( + [domain([1, 2, 3, 4]) for i in 1:4], + allunique_vals, + GeneticOptimizer(), + icn = new_test_icn, + vals = [3, 4] + )[2] +end + +# SECTION - CBLSOptimizer Extension +struct LocalSearchOptimizer <: AbstractOptimizer + options::Any +end + +#FIXME - Broken for compatibility reasons until LocalSearchSolvers updates its compat entries +# @testitem "LocalSearchOptimizer" tags = [:extension] default_imports = false begin +# import CompositionalNetworks: Transformation, Arithmetic, Aggregation, SimpleFilter +# import CompositionalNetworks: LocalSearchOptimizer, explore_learn, Comparison, ICN +# import ConstraintDomains: domain +# import LocalSearchSolvers +# import Test: @test + +# test_icn = ICN(; +# parameters=[:dom_size, :numvars, :val], +# layers=[Transformation, Arithmetic, Aggregation, Comparison], +# connection=[1, 2, 3, 4], +# ) + +# function allunique_val(x; val) +# for i in 1:(length(x)-1) +# for j in (i+1):length(x) +# if x[i] == x[j] +# if x[i] != val +# return false +# end +# end +# end +# end +# return true +# end + +# function allunique_vals(x; vals) +# for i in 1:(length(x)-1) +# for j in (i+1):length(x) +# if x[i] == x[j] +# if !(x[i] in vals) +# return false +# end +# end +# end +# end +# return true +# end + +# @test explore_learn([domain([1, 2, 3, 4]) for i in 1:4], allunique_val, LocalSearchOptimizer(), icn=test_icn, val=3)[2] + +# new_test_icn = ICN(; +# parameters=[:dom_size, :numvars, :vals], +# layers=[SimpleFilter, Transformation, Arithmetic, Aggregation, Comparison], +# connection=[1, 2, 3, 4, 5], +# ) + +# @test explore_learn([domain([1, 2, 3, 4]) for i in 1:4], allunique_vals, LocalSearchOptimizer(), icn=new_test_icn, vals=[3, 4])[2] +# end + +struct JuMPOptimizer <: AbstractOptimizer +end diff --git a/test/Aqua.jl b/test/Aqua.jl index 4a12a23..a9ecc31 100644 --- a/test/Aqua.jl +++ b/test/Aqua.jl @@ -2,17 +2,7 @@ import Aqua import CompositionalNetworks - # TODO: Fix the broken tests and remove the `broken = true` flag - Aqua.test_all( - CompositionalNetworks; - ambiguities = (broken = true,), - deps_compat = false, - piracies = (broken = false,), - ) - - @testset "Ambiguities: CompositionalNetworks" begin - # Aqua.test_ambiguities(CompositionalNetworks;) - end + Aqua.test_all(CompositionalNetworks; deps_compat = false) @testset "Piracies: CompositionalNetworks" begin Aqua.test_piracies(CompositionalNetworks;) @@ -21,7 +11,7 @@ @testset "Dependencies compatibility (no extras)" begin Aqua.test_deps_compat( CompositionalNetworks; - check_extras = false, # ignore = [:Random] + check_extras = false # ignore = [:Random] ) end end diff --git a/test/genetic.jl b/test/genetic.jl deleted file mode 100644 index 2b1f22c..0000000 --- a/test/genetic.jl +++ /dev/null @@ -1,144 +0,0 @@ -""" - generate_population(icn, pop_size -Generate a pôpulation of weights (individuals) for the genetic algorithm weighting `icn`. -""" -function generate_population(icn, pop_size) - population = Vector{BitVector}() - foreach(_ -> push!(population, falses(nbits(icn))), 1:pop_size) - return population -end - -""" - _optimize!(icn, X, X_sols; metric = hamming, pop_size = 200) -Optimize and set the weights of an ICN with a given set of configuration `X` and solutions `X_sols`. -""" -function _optimize!( - icn, - solutions, - non_sltns, - dom_size, - metric, - pop_size, - iterations; - samples = nothing, - memoize = false, - parameters..., -) - inplace = zeros(dom_size, max_icn_length()) - _non_sltns = isnothing(samples) ? non_sltns : rand(non_sltns, samples) - - function fitness(w) - compo = compose(icn, w) - f = composition(compo) - S = Iterators.flatten((solutions, _non_sltns)) - σ = sum( - x -> abs(f(x; X = inplace, dom_size, parameters...) - metric(x, solutions)), - S, - ) - return σ + regularization(icn) + weights_bias(w) - end - - _fitness = memoize ? (@memoize Dict memoize_fitness(w) = fitness(w)) : fitness - - _icn_ga = GA(; - populationSize = pop_size, - crossoverRate = 0.8, - epsilon = 0.05, - selection = tournament(2), - crossover = SPX, - mutation = flip, - mutationRate = 1.0, - ) - - pop = generate_population(icn, pop_size) - r = Evolutionary.optimize(_fitness, pop, _icn_ga, Evolutionary.Options(; iterations)) - return weights!(icn, Evolutionary.minimizer(r)) -end - -""" - optimize!(icn, X, X_sols, global_iter, local_iter; metric=hamming, popSize=100) -Optimize and set the weights of an ICN with a given set of configuration `X` and solutions `X_sols`. The best weights among `global_iter` will be set. -""" -function optimize!( - icn, - solutions, - non_sltns, - global_iter, - iter, - dom_size, - metric, - pop_size; - sampler = nothing, - memoize = false, - parameters..., -) - results = Dictionary{BitVector,Int}() - aux_results = Vector{BitVector}(undef, global_iter) - nt = Base.Threads.nthreads() - - @info """Starting optimization of weights$(nt > 1 ? " (multithreaded)" : "")""" - samples = isnothing(sampler) ? nothing : sampler(length(solutions) + length(non_sltns)) - @qthreads for i = 1:global_iter - @info "Iteration $i" - aux_icn = deepcopy(icn) - _optimize!( - aux_icn, - solutions, - non_sltns, - dom_size, - eval(metric), - pop_size, - iter; - samples, - memoize, - parameters..., - ) - aux_results[i] = weights(aux_icn) - end - foreach(bv -> incsert!(results, bv), aux_results) - best = rand(findall(x -> x == maximum(results), results)) - weights!(icn, best) - return best, results -end - -struct GeneticOptimizer <: AbstractOptimizer - global_iter::Int - local_iter::Int - memoize::Bool - pop_size::Int - sampler::Union{Nothing,Function} -end - -function GeneticOptimizer(; - global_iter = Threads.nthreads(), - local_iter = 64, - memoize = false, - pop_size = 64, - sampler = nothing, -) - return GeneticOptimizer(global_iter, local_iter, memoize, pop_size, sampler) -end - -function CompositionalNetworks.optimize!( - icn, - solutions, - non_sltns, - dom_size, - metric, - optimizer::GeneticOptimizer; - parameters..., -) - return optimize!( - icn, - solutions, - non_sltns, - optimizer.global_iter, - optimizer.local_iter, - dom_size, - metric, - optimizer.pop_size; - optimizer.sampler, - optimizer.memoize, - parameters..., - ) -end diff --git a/test/icn.jl b/test/icn.jl deleted file mode 100644 index a821f53..0000000 --- a/test/icn.jl +++ /dev/null @@ -1,45 +0,0 @@ -@testset "ICNs" begin - using CompositionalNetworks - using ConstraintDomains - using Dictionaries - using Evolutionary - using Memoization - using Test - using ThreadPools - - import CompositionalNetworks: AbstractOptimizer - - include("genetic.jl") - - # # Test with manually weighted ICN - icn = ICN(param = [:val]) - @test max_icn_length() == 18 - show_layers(icn) - - icn.weights = vcat(trues(18), falses(6)) - @test CompositionalNetworks.is_viable(icn) - @test length(icn) == 31 - - compo = compose(icn) - @test code(compo; name = "test_composition") == - "test_composition = identity ∘ sum ∘ sum ∘ [val_minus_var, var_minus_val" * - ", count_bounding_val, count_g_val, count_l_val, count_eq_val," * - " contiguous_vars_minus_rev, contiguous_vars_minus, count_l_right, count_g_right" * - ", count_l_left, count_g_left, count_lesser, count_greater, count_eq_right, " * - "count_eq_left, count_eq, identity]" - - v = [1, 2, 4, 3] - @test composition(compo)(v; val = 2, dom_size = 4) == 67 - - CompositionalNetworks.generate_weights(icn) - - ## Test GA and exploration - domains = [domain([1, 2, 3, 4]) for i = 1:4] - compo, _ = explore_learn_compose(domains, allunique; optimizer = GeneticOptimizer()) - @test composition(compo)([1, 2, 3, 3], dom_size = 4) > 0.0 - - ## Test export to file - composition_to_file!(compo, "test_dummy.jl", "all_different") - rm("test_dummy.jl"; force = true) - -end diff --git a/test/runtests.jl b/test/runtests.jl index c3ed1ee..0acff18 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -11,6 +11,4 @@ using TestItemRunner include("ExplicitImports.jl") include("JET.jl") include("TestItemRunner.jl") - - include("icn.jl") end