モチベーション

メタヒューリスティック手法を開発するアプリケーションの中に導入したい

Particle Swarm Optimizationとは

Particle Swarm Optimization(以下PSO)とは群知能の一種であり、鳥の群れや魚の群れの行動をモデル化することで与えられた目的関数の最適解近傍を効率的に探索する手法である。近傍と述べているのは、求められた解が必ずしも最適解である保証はないが、現実的な時間内で近しい値を算出できるためである。似たような手法にGenetic algorithm(GA)やTabu search(TS)等が存在するが、PSOはそれらと比べて極めて実装が容易である。

PSOのモデル式

PSOのモデル式は群れの知能 : Particle Swarm Optimization(<特集>最適化問題へのアプローチ)を参考のこと。

目的関数と環境変数

以下の目的関数、環境変数からなる最適化問題を解く。

  • 目的関数 : f(x,y) = x ^ 2 + y ^ 2
  • 環境変数 : x, y
  • 制約式 : - 10 ≤ x ≤ 10 , - 10 ≤ y ≤ 10

この式はメタヒューリスティック手法なんかを使わずとも、最適解が0であることはわかるので、PSOによる解もそれの近傍であると予想できる。

PSOの実装

def objective_function(vector)
  # f(x,y) = x^2 + y^2の右辺の計算
  return vector.inject(0) {|sum, val| sum + val ** 2}
end

def get_random_vector(minmax_array)
  return Array.new(minmax_array.size) do |i|
    if minmax_array[i].has_key?(:min_x) or minmax_array[i].has_key?(:min_y)
      if i == 0
        minmax_array[i][:min_x] + ((minmax_array[i][:max_x] - minmax_array[i][:min_x]) * rand())
      else
        minmax_array[i][:min_y] + ((minmax_array[i][:max_y] - minmax_array[i][:min_y]) * rand())
      end
    else #for velocity vector
      if i == 0
        minmax_array[i][:min_x_vel] + ((minmax_array[i][:max_x_vel] - minmax_array[i][:min_x_vel]) * rand())
      else
        minmax_array[i][:min_y_vel] + ((minmax_array[i][:max_y_vel] - minmax_array[i][:min_y_vel]) * rand())
      end
    end
  end
end

def create_particle(search_space, vel_space)
  particle = {}
  particle[:position] = get_random_vector(search_space)
  particle[:cost] = objective_function(particle[:position])
  particle[:best_postion] = Array.new(particle[:position])
  particle[:best_cost] = particle[:cost]
  particle[:velocity] = get_random_vector(vel_space)
  return particle
end

def update_velocity(particle, global_best, max_v, c1, c2)
  particle[:velocity].each_with_index do |v,i|
    v1 = c1 * rand() * (particle[:best_postion][i] - particle[:position][i])
    v2 = c2 * rand() * (global_best[:position][i] - particle[:position][i])
    particle[:velocity][i] = v + v1 + v2
    # max_v制約から外れた場合は速度を強制的に最大値に修正する
    particle[:velocity][i] = max_v if particle[:velocity][i] > max_v
    particle[:velocity][i] = -max_v if particle[:velocity][i] < -max_v
  end
end

def update_position(particle,search_space)
  particle[:position].each_with_index do |v,i|
    particle[:position][i] = v + particle[:velocity][i]
    if i == 0 # xの位置
      if particle[:position][i] > search_space[i][:max_x]
        particle[:position][i] = search_space[i][:max_x] - (particle[:position][i] - search_space[i][:max_x]).abs
        particle[:velocity][i] *= -1.0
      elsif  particle[:position][i] < search_space[i][:min_x]
        particle[:position][i] = search_space[i][:min_x] + (particle[:position][i] - search_space[i][:min_x]).abs
        particle[:velocity][i] *= -1.0
      end
    else  # yの位置
      if particle[:position][i] > search_space[i][:max_y]
      particle[:position][i] = search_space[i][:max_y] - (particle[:position][i] - search_space[i][:max_y]).abs
      elsif  particle[:position][i] < search_space[i][:min_y]
        particle[:position][i] = search_space[i][:min_y] + (particle[:position][i] - search_space[i][:min_y]).abs
        particle[:velocity][i] *= -1.0
      end
    end
  end
end

def get_global_best(population_array, current_best = nil)
  # costで昇順にソート
  population_array.sort!{|val1,val2| val1[:cost] <=> val2[:cost]}
  best = population_array.first

  if current_best.nil? or best[:cost] <= current_best[:cost]
    current_best = {}
    current_best[:position] = Array.new(best[:position])
    current_best[:cost] = best[:cost]
  end
  return current_best
end

def update_best_position(particle)
  return if particle[:cost] > particle[:best_cost]
  particle[:best_cost] = particle[:cost]
  particle[:best_postion] = Array.new(particle[:position])
end

def search_gbest(search_space, vel_space, num_population, max_gens, max_vel ,c1, c2)
  # Particle及びGlobal最適解の初期化
  population_array = Array.new(num_population) {create_particle(search_space, vel_space)}
  global_best = get_global_best(population_array)

  # 反復計算
  max_gens.times do |gen|
    population_array.each do |particle|
      update_velocity(particle, global_best, max_vel, c1, c2)
      update_position(particle, search_space)
      particle[:cost] = objective_function(particle[:position])
      update_best_position(particle)
    end
    global_best = get_global_best(population_array, global_best)
    puts "> gen #{gen + 1}, fitness=#{global_best[:cost]}"
  end
  return global_best
end

if __FILE__ == $0
  # Problem variables and constraints
  ## 環境変数制約
  min_x, max_x = -10, 10
  min_y, max_y = -10, 10
  min_x_vel, max_x_vel = -1, 1
  min_y_vel, max_y_vel = -1, 1
  ## 2次元空間の探索とする
  search_space = [{min_x: min_x, max_x: max_x}, {min_y: min_y, max_y: max_y}]

  # Algorythm parameter setting
  vel_space = [{min_x_vel: min_x_vel, max_x_vel: max_x_vel}, {min_y_vel: min_y_vel, max_y_vel: max_y_vel}]

  ## 群数の定義
  num_population = 100
  ## 最大反復回数
  max_gens = 30
  ## 最大速度
  max_vel = 100.0
  ## 重み係数
  c1, c2 = 2.0, 2.0

  optimum_answer = search_gbest(search_space, vel_space, num_population, max_gens, max_vel ,c1, c2)

  puts "Optimization completed! Solution: f(x,y) = #{optimum_answer[:cost]}, x,y = #{optimum_answer[:position]}"

end

実行結果

➤  ruby pso.rb
> gen 1, fitness=0.15066388242429396
> gen 2, fitness=0.15066388242429396
> gen 3, fitness=0.1299080097044353
> gen 4, fitness=0.1299080097044353
> gen 5, fitness=0.1299080097044353
> gen 6, fitness=0.061630657186562014
> gen 7, fitness=0.05162623875925075
> gen 8, fitness=0.05162623875925075
> gen 9, fitness=0.05162623875925075
> gen 10, fitness=0.05162623875925075
> gen 11, fitness=0.05162623875925075
> gen 12, fitness=0.05162623875925075
> gen 13, fitness=0.05162623875925075
> gen 14, fitness=0.021708159560295315
> gen 15, fitness=0.021708159560295315
> gen 16, fitness=0.021708159560295315
> gen 17, fitness=0.021708159560295315
> gen 18, fitness=0.021708159560295315
> gen 19, fitness=0.0005691661191247639
> gen 20, fitness=0.0005691661191247639
> gen 21, fitness=0.0005691661191247639
> gen 22, fitness=0.0005691661191247639
> gen 23, fitness=0.0005691661191247639
> gen 24, fitness=0.0005691661191247639
> gen 25, fitness=0.0005691661191247639
> gen 26, fitness=0.0005691661191247639
> gen 27, fitness=0.0005691661191247639
> gen 28, fitness=0.0005691661191247639
> gen 29, fitness=0.0005691661191247639
> gen 30, fitness=0.0005691661191247639
Optimization completed! Solution: f(x,y) = 0.0005691661191247639, x,y = [-0.011276117027324783, -0.021024160004881054]

感想

実験結果よりPSOにより目的関数の近傍解が算出できることがわかる。より最適解に近い近傍を求めたり、効率的に求めるにはPSOのモデル式に工夫を加える必要がある。PSOに関しては様々な研究がされているので必要があればそれらを参考にしてより対象の問題にフィットするアルゴリズムを導入すれば良い。

参考

PSOの最適化問題への適用事例