/ramiro/demec/ufpe

Threading

Ordenação natural.

A maneira mais clássica de implementar uma iteração de Gauss-Seidel é fazer a atualização dos nós seguindo sua numeração em ordem crescente.

Para facilitar a implementação, vamos colocar algumas coisas comuns em um módulo, gssupp.py, mostrado abaixo.

import logging
import numpy       as     np
from   math        import pi

__format = "%(asctime)s: %(message)s"
logging.basicConfig(format=__format, level=logging.INFO, datefmt="%H:%M:%S")

class Problem:
    def __init__(self, u, f, hist=None):
        self.u = u
        self.f = f
        self.hist = hist

def info(*args, **kwargs):
    """Directly logs info calls, 99% of what we're doing."""
    logging.info(*args, **kwargs)

def zero_rhs(x, L):
    """Sets up a null vector for the RHS of the equation"""
    return np.zeros_like(x)

def sin_rhs(x, L, A):
    """Sets up a rhs that produces a single period of a senoidal function with
       amplitude A as the solution."""
    f = 2*pi/L
    return -f*f*A*np.sin(f*x)

def cub_rhs(x, L, A):
    """Sets up rhs to have a cubic solution with zeros on 0, L/2, L/3
       and value A on L.
       Don't forget to set up the correct boundary conditions."""
    return - 9*(A*L - 3*A*x)/(L*L*L)

Uma implementação clássica então, sem nenhuma grande preocupação com a elegância, segue abaixo. Isto aqui é basicamente Fortran 77 escrito em Python. Iremos melhorando isto progressivamente, após discutirmos algumas facetas desta implementação.

import numpy  as np
import gssupp as gs
import matplotlib.pyplot as plt

gs.info("Setting up problems.")
A = 4.0
L = 3.0

nn = 21
maxiter = 1000
tol = 5e-4

x = np.linspace(0, L, nn)
dx = x[1] - x[0]
dx2 = dx*dx
ul = np.zeros_like(x)
us = np.zeros_like(x)
uc = np.zeros_like(x)

# Linear solution
fl = np.zeros_like(x)
ul[-1] = A

# Senoidal solution
fs = gs.sin_rhs(x, L, A)

# Cubic solution
fc = gs.cub_rhs(x, L, A)
uc[-1] = A

probs = { 'linear': gs.Problem(u=ul, f=fl),
        'sinoidal': gs.Problem(u=us, f=fs),
           'cubic': gs.Problem(u=uc, f=fc)}

## Run Classical GS lexicographycal ordering iterarion
for name, p in probs.items():
    gs.info("Solving {} problem.".format(name))
    err = np.zeros(maxiter, dtype=np.float64)
    converged = False
    for it in range(maxiter):
        uold = p.u.copy()
        for i in range(1, p.u.size-1):
            p.u[i] = 0.5*(p.u[i-1] + p.u[i+1] - dx2*p.f[i])  
        err[it] = np.linalg.norm(uold - p.u)/np.linalg.norm(p.u)
        if err[it] <= tol:
            converged = True
            break
    err = np.resize(err, it+1)
    p.hist = err

fig = 0
for name, p in probs.items():
    plt.figure(fig)
    plt.title("{} problem solution".format(name))
    plt.plot(x, p.u)
    fig += 1
    plt.figure(fig)
    plt.title("{} problem convergence history".format(name))
    plt.yscale('log')
    plt.plot(p.hist)
    fig += 1

Ordenação reversa e simétrica

Apenas para mostrar que a ordem de atualização das variáveis influencia a taxa de convergência, vamos refazer este programa incluindo também a atualização em ordem reversa, da direita para a esquerda, e uma atualização simétrica, da esquerda para a direita e depois da direita para a esquerda.

Para fazer isto, criamos uma classe cujos objetos são ranges que mudam da ordem crescente para a decrescente cada vez que são chamados.

"""
gssup02.py

This just organizes a few support functions for the GS iteration.
Includes setting up boundary conditions in the 'Problem' class, and
introduces a symmetric range class.
"""

import logging
import numpy       as     np
from   math        import pi
from   itertools   import cycle

__format = "%(asctime)s: %(message)s"
logging.basicConfig(format=__format, level=logging.INFO, datefmt="%H:%M:%S")

class Problem:
    def __init__(self, x, f, U0=None, UL=None, hist=None):
        self.x = x
        self.u = np.zeros_like(x)
        if U0 is not None: self.u[0] = U0 
        if UL is not None: self.u[-1] = UL 
        self.f = f
        self.hist = hist

class SymmRange:
    """
    A range that goes forward and backward.
    This really is a bit of overengineering, and it's taylored 
    to the symmetric GS iteration.
    Also, never tested with step different from 1, so beware.
    """
    def __init__(self, start=0, stop=0, step=1):
        self.ranges = cycle([range(start, stop, step),
                             range(stop-1, start-1, -step)])
        self.current_range = iter(next(self.ranges))

    def __iter__(self):
        return self

    def __next__(self):
        try:
            return next(self.current_range)
        except StopIteration:
            self.current_range = iter(next(self.ranges))
            raise StopIteration

def info(*args, **kwargs):
    """Directly logs info calls, 99% of what we're doing."""
    logging.info(*args, **kwargs)

def zero_rhs(x, L):
    """Sets up a null vector for the RHS of the equation"""
    return np.zeros_like(x)

def sin_rhs(x, L, A):
    """Sets up a rhs that produces a single period of a senoidal function with
       amplitude A as the solution."""
    f = 2*pi/L
    return -f*f*A*np.sin(f*x)

def cub_rhs(x, L, A):
    """Sets up rhs to have a cubic solution with zeros on 0, L/2, L/3
       and value A on L.
       Don't forget to set up the correct boundary conditions."""
    return -9*(A*L - 3*A*x)/(L*L*L)

A implementação com variação na ordem de iteração está abaixo.

"""
Comparison of convergence rates between forward, backward and
symmetric GS iterations
"""

import numpy  as np
import gssup02 as gs
import matplotlib.pyplot as plt

gs.info("Setting up problems.")
A = 4.0
L = 3.0

nn = 21
maxiter = 1000
tol = 5e-4

x = np.linspace(0, L, nn)
dx = x[1] - x[0]
dx2 = dx*dx

fl = np.zeros_like(x)            # Linear solution
fs = gs.sin_rhs(x, L, A)         # Senoidal solution
fc = gs.cub_rhs(x, L, A)         # Cubic solution

probs_fwd = { 'linear': gs.Problem(x, fl, UL=A),
          'sinusoidal': gs.Problem(x, fs),
               'cubic': gs.Problem(x, fc, UL=A)}

probs_bwd = { 'linear': gs.Problem(x, fl, UL=A),
          'sinusoidal': gs.Problem(x, fs),
               'cubic': gs.Problem(x, fc, UL=A)}

probs_sym = { 'linear': gs.Problem(x, fl, UL=A),
          'sinusoidal': gs.Problem(x, fs),
               'cubic': gs.Problem(x, fc, UL=A)}

## Run GS  iterarions
symrange = gs.SymmRange(1, x.size-1)
orders = { 'forward': (probs_fwd, range(1, x.size-1)),
          'backward': (probs_bwd, range(x.size-2,0,-1)),
         'symmetric': (probs_sym, symrange)}

for order, (prob, prob_range) in orders.items():
    gs.info("Running {} ordering.".format(order))
    for name, p in prob.items():
        gs.info("Solving {} problem.".format(name))
        err = np.zeros(maxiter, dtype=np.float64)
        converged = False
        for it in range(maxiter):
            uold = p.u.copy()
            for i in prob_range:
                p.u[i] = 0.5*(p.u[i-1] + p.u[i+1] - dx2*p.f[i])  
            err[it] = np.linalg.norm(uold - p.u)/np.linalg.norm(p.u)
            if err[it] <= tol:
                converged = True
                break
        err = np.resize(err, it+1)
        p.hist = err

figs = { 'linear': (0,3),
     'sinusoidal': (1,4),
          'cubic': (2,5)}

styles = {'forward': 'r-',
         'backward': 'b:',
        'symmetric': 'g-.'}

# Format graphs
for name in probs_fwd.keys():
    plt.figure(figs[name][0])
    plt.title("Solution for problem {}".format(name))
    plt.figure(figs[name][1])
    plt.yscale('log')
    plt.title("{} problem convergence history".format(name))

# Plot results
for order, (prob, _) in orders.items():
    for name, p in prob.items():
        plt.figure(figs[name][0])
        plt.plot(x, p.u, styles[order])     
        plt.figure(figs[name][1])
        plt.plot(p.hist, styles[order])