#!/usr/bin/perl
# This chunk of stuff was generated by App::FatPacker. To find the original
# file's code, look for the end of this BEGIN block or the string 'FATPACK'
BEGIN {
my %fatpacked;

$fatpacked{"PgToolkit/Class.pm"} = <<'PGTOOLKIT_CLASS';
  package PgToolkit::Class;
  
  use strict;
  use warnings;
  
  =head1 NAME
  
  B<PgToolkit::Class> - a base class.
  
  =head1 SYNOPSIS
  
  	package Foo;
  
  	use base qw(PgToolkit::Class);
  
  	sub init {
  		# some initialization
  	}
  
  	# some methoods
  
  	1;
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Class> is a base class encapsulating the instantiation
  automation stuff.
  
  =head1 METHODS
  
  =head2 B<new()>
  
  A constructor. It can be called both on the class and on the object.
  
  =head3 Arguments
  
  An arbitrary number of arguments which will be later passed to the
  C<init()> method.
  
  =head3 Returns
  
  A new instance of the class that the method is being called on.
  
  =cut
  
  sub new {
  	my $class = shift;
  
  	my $self  = {};
  	bless($self, (ref($class) or $class));
  
  	# Calling init with the same @_
  	$self->init(@_);
  
  	return $self;
  }
  
  =head2 B<init()>
  
  This method is called after an object has been instantiated using the
  C<new()> method, all parameters that have been passed to the
  constructor are passed to this method also.
  
  =cut
  
  sub init {
  	# The init method stub
  }
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_CLASS

$fatpacked{"PgToolkit/Compactor.pm"} = <<'PGTOOLKIT_COMPACTOR';
  package PgToolkit::Compactor;
  
  use base qw(PgToolkit::Class);
  
  use strict;
  use warnings;
  
  =head1 NAME
  
  B<PgToolkit::Compactor> - a base compactor.
  
  =head1 SYNOPSIS
  
  	package PgToolkit::CompactorStub;
  
  	use base qw(PgToolkit::Compactor);
  
  	sub _init {
  		my $self = shift;
  
  		$self->{'_log_target'} = 'some_target';
  		# some other initialization
  	}
  
  	sub _process {
  		# some process implementation
  	}
  
  	1;
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Compactor> is a base class for boald reducing mechanisms
  implementation. You can implement _init() and must _process(). The
  _logger property is defined in this methods. If you want log entries
  to have target define the _log_target property.
  
  =head3 Constructor arguments
  
  =over 4
  
  =item C<logger>
  
  a logger object
  
  =item C<dry_run>
  
  =back
  
  =cut
  
  sub init {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'} = $arg_hash{'logger'};
  	$self->{'_dry_run'} = $arg_hash{'dry_run'};
  
  	$self->_wrap(code => sub { $self->_init(%arg_hash); });
  
  	return;
  }
  
  =head1 METHODS
  
  =head2 B<process()>
  
  Runs a bloat reducing process.
  
  =head3 Arguments
  
  =over 4
  
  =item C<attempt>
  
  an attempt number of processing.
  
  =back
  
  =cut
  
  sub process {
  	my ($self, %arg_hash) = @_;
  
  	$self->_wrap(code => sub { $self->_process(%arg_hash); });
  
  	return;
  }
  
  sub _init {
  	# initialization stub
  }
  
  sub _process {
  	die('NotImplementedError');
  }
  
  sub _exit {
  	exit(1);
  }
  
  sub _wrap {
  	my ($self, %arg_hash) = @_;
  
  	eval {
  		$arg_hash{'code'}->();
  	};
  	if ($@) {
  		if ($@ =~ 'DatabaseError') {
  			$self->{'_logger'}->write(
  				message => 'A database error occurred, exiting:'."\n".$@,
  				level => 'error',
  				(defined $self->{'_log_target'} ?
  				 (target => $self->{'_log_target'}) : ()));
  			$self->_exit();
  		} else {
  			die($@);
  		}
  	}
  
  	return;
  }
  
  sub _execute_and_log {
  	my ($self, %arg_hash) = @_;
  
  	my $result = $self->{'_database'}->execute(sql => $arg_hash{'sql'});
  
  	my $duration = sprintf("%.3f", $self->{'_database'}->get_duration());
  
  	$self->{'_logger'}->write(
  		message => ('Executed SQL: duration '.$duration.'s, statement: '.
  					"\n".$arg_hash{'sql'}),
  		level => (defined $arg_hash{'level'} ? $arg_hash{'level'} : 'debug0'),
  		target => $self->{'_log_target'});
  
  	return $result;
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<PgToolkit::Class>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_COMPACTOR

$fatpacked{"PgToolkit/Compactor/Cluster.pm"} = <<'PGTOOLKIT_COMPACTOR_CLUSTER';
  package PgToolkit::Compactor::Cluster;
  
  use base qw(PgToolkit::Compactor);
  
  use strict;
  use warnings;
  
  use PgToolkit::Utils;
  
  =head1 NAME
  
  B<PgToolkit::Compactor::Cluster> - a cluster level processing for bloat
  reducing.
  
  =head1 SYNOPSIS
  
  	my $cluster_compactor = PgToolkit::Compactor::Cluster->new(
  		database_constructor => $database_constructor,
  		logger => $logger,
  		dry_run => 0,
  		database_compactor_constructor => $database_compactor_constructor,
  		dbname_list => $dbname_list,
  		excluded_dbname_list => $excluded_dbname_list,
  		max_retry_count => 10);
  
  	$cluster_compactor->process();
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Compactor::Cluster> class is an implementation of a cluster
  level processing logic for bloat reducing mechanism.
  
  =head3 Constructor arguments
  
  =over 4
  
  =item C<database_constructor>
  
  a database constructor code reference
  
  =item C<logger>
  
  a logger object
  
  =item C<dry_run>
  
  =item C<database_compactor_constructor>
  
  a database compactor constructor code reference
  
  =item C<dbname_list>
  
  a list of database names to process
  
  =item C<excluded_dbname_list>
  
  a list of database names to exclude from processing
  
  =item C<max_retry_count>
  
  a maximum amount of attempts to compact cluster.
  
  =back
  
  =cut
  
  sub _init {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_database_constructor'} = $arg_hash{'database_constructor'};
  	$self->{'_max_retry_count'} = $arg_hash{'max_retry_count'};
  
  	$self->{'_database'} =
  		$self->{'_database_constructor'}->(dbname => 'postgres');
  
  	$self->{'_logger'}->write(
  		message => ('Database connection method: '.
  					$self->{'_database'}->get_adapter_name().'.'),
  		level => 'info');
  
  	my $dbname_list = $self->_get_dbname_list(
  		dbname_list => $arg_hash{'dbname_list'},
  		excluded_dbname_list => $arg_hash{'excluded_dbname_list'});
  
  	$self->{'_database_compactor_list'} = [];
  	for my $dbname (@{$dbname_list}) {
  		my $database_compactor =
  			$arg_hash{'database_compactor_constructor'}->(
  				database => $self->{'_database_constructor'}->(
  					dbname => $dbname));
  		push(@{$self->{'_database_compactor_list'}}, $database_compactor);
  	}
  
  	return;
  }
  
  sub _process {
  	my $self = shift;
  
  	if (@{$self->{'_database_compactor_list'}}) {
  		my $attempt = 0;
  		while (not $self->is_processed() and
  			   $attempt <= $self->{'_max_retry_count'})
  		{
  			if ($attempt != 0) {
  				$self->{'_logger'}->write(
  					message => ('Retrying to process, attempt: '.$attempt.
  								' from '.$self->{'_max_retry_count'}.', '.
  								$self->_incomplete_count().' databases left.'),
  					level => 'notice');
  			}
  
  			for my $database_compactor (@{$self->{'_database_compactor_list'}})
  			{
  				if (not $database_compactor->is_processed()) {
  					$database_compactor->process(attempt => $attempt);
  				}
  			}
  
  			$attempt++;
  		}
  
  		my $databases_size_delta_report = join(
  			', ',
  			map(
  				PgToolkit::Utils->get_size_pretty(size => $_->get_size_delta()).
  				' ('.PgToolkit::Utils->get_size_pretty(
  					size => $_->get_total_size_delta()).') '.
  				$_->get_log_target(),
  				@{$self->{'_database_compactor_list'}}));
  
  		if (not $self->{'_dry_run'}) {
  			if ($self->is_processed()) {
  				$self->{'_logger'}->write(
  					message => (
  						'Processing complete: '.
  						($attempt ? ($attempt - 1).' retries from '.
  						 $self->{'_max_retry_count'} : ' no attempts to '.
  						 'process have been done') .', size reduced by '.
  						PgToolkit::Utils->get_size_pretty(
  							size => $self->get_size_delta()).' ('.
  						PgToolkit::Utils->get_size_pretty(
  							size => $self->get_total_size_delta()).' '.
  						'including toasts and indexes) in total, '.
  						$databases_size_delta_report.'.'),
  					level => 'notice');
  			} else {
  				$self->{'_logger'}->write(
  					message => (
  						'Processing incomplete: '.$self->_incomplete_count().
  						' databases left, size reduced by '.
  						PgToolkit::Utils->get_size_pretty(
  							size => $self->get_size_delta()).' ('.
  						PgToolkit::Utils->get_size_pretty(
  							size => $self->get_total_size_delta()).' '.
  						'including toasts and indexes) in total, '.
  						$databases_size_delta_report.'.'),
  					level => 'warning');
  			}
  		}
  	} else {
  		$self->{'_logger'}->write(
  			message => 'No databases to process.',
  			level => 'warning');
  	}
  
  	return;
  }
  
  =head1 METHODS
  
  =head2 B<is_processed()>
  
  Tests if the cluster is processed.
  
  =head3 Returns
  
  True or false value.
  
  =cut
  
  sub is_processed {
  	my $self = shift;
  
  	my $result = 1;
  	map(($result &&= $_->is_processed()),
  		@{$self->{'_database_compactor_list'}});
  
  	return $result;
  }
  
  =head2 B<get_size_delta()>
  
  Returns a size delta in bytes.
  
  =head3 Returns
  
  A number or undef if has not been processed.
  
  =cut
  
  sub get_size_delta {
  	my $self = shift;
  
  	my $result = 0;
  	map($result += $_->get_size_delta(),
  		@{$self->{'_database_compactor_list'}});
  
  	return $result;
  }
  
  =head2 B<get_total_size_delta()>
  
  Returns a total (including toasts and indexes) size delta in bytes.
  
  =head3 Returns
  
  A number or undef if has not been processed.
  
  =cut
  
  sub get_total_size_delta {
  	my $self = shift;
  
  	my $result = 0;
  	map($result += $_->get_total_size_delta(),
  		@{$self->{'_database_compactor_list'}});
  
  	return $result;
  }
  
  sub _incomplete_count {
  	my $self = shift;
  
  	my $result = 0;
  	map(($result += not $_->is_processed()),
  		@{$self->{'_database_compactor_list'}});
  
  	return $result;
  }
  
  sub _get_dbname_list {
  	my ($self, %arg_hash) = @_;
  
  	my $in = '';
  	if (@{$arg_hash{'dbname_list'}}) {
  		$in =
  			'datname IN ('.
  			join(', ', map("'$_'", @{$arg_hash{'dbname_list'}})).
  			') AND';
  	}
  
  	my $not_in = '';
  	if (@{$arg_hash{'excluded_dbname_list'}}) {
  		$not_in =
  			'datname NOT IN ('.
  			join(', ', map("'$_'", @{$arg_hash{'excluded_dbname_list'}})).
  			') AND';
  	}
  
  	my $result = $self->_execute_and_log(
  			sql => <<SQL
  SELECT datname FROM pg_catalog.pg_database
  WHERE
      $in
      $not_in
      datname NOT IN ('postgres', 'template0', 'template1')
  ORDER BY pg_catalog.pg_database_size(datname), datname
  SQL
  		);
  
  	return [map($_->[0], @{$result})];
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<PgToolkit::Class>
  =item L<PgToolkit::Utils>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_COMPACTOR_CLUSTER

$fatpacked{"PgToolkit/Compactor/Database.pm"} = <<'PGTOOLKIT_COMPACTOR_DATABASE';
  package PgToolkit::Compactor::Database;
  
  use base qw(PgToolkit::Compactor);
  
  use strict;
  use warnings;
  
  use PgToolkit::Utils;
  
  =head1 NAME
  
  B<PgToolkit::Compactor::Database> - a database level processing for bloat
  reducing.
  
  =head1 SYNOPSIS
  
  	my $database_compactor = PgToolkit::Compactor::Database->new(
  		database => $database,
  		logger => $logger,
  		dry_run => 0,
  		table_compactor_constructor => $table_compactor_constructor,
  		schema_name_list => ['schema1', 'schema2'],
  		excluded_schema_name_list => [],
  		table_name_list => ['table1', 'table2'],
  		excluded_table_name_list => [],
  		no_pgstatuple => 0);
  
  	$database_compactor->process();
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Compactor::Database> class is an implementation of a database
  level processing logic for bloat reducing mechanism.
  
  =head3 Constructor arguments
  
  =over 4
  
  =item C<database>
  
  a database object
  
  =item C<logger>
  
  a logger object
  
  =item C<dry_run>
  
  =item C<table_compactor_constructor>
  
  a table compactor constructor code reference
  
  =item C<schema_name_list>
  
  a list of schema names to process
  
  =item C<excluded_schema_name_list>
  
  a list of schema names to exclude from processing
  
  =item C<table_name_list>
  
  a list of table names to process
  
  =item C<excluded_table_name_list>
  
  a list of table names to exclude from processing
  
  =item C<no_pgstatuple>
  
  do not use pgstattuple to calculate statictics.
  
  =back
  
  =cut
  
  sub _init {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_database'} = $arg_hash{'database'};
  
  	$self->{'_ident'} = $self->{'_database'}->quote_ident(
  		string => $self->{'_database'}->get_dbname());
  
  	$self->{'_log_target'} = $self->{'_ident'};
  
  	if (not $self->{'_dry_run'}) {
  		$self->_create_clean_pages_function();
  		$self->{'_logger'}->write(
  			message => 'Created environment.',
  			level => 'info',
  			target => $self->{'_log_target'});
  	}
  
  	my $pgstattuple_schema_name;
  	if (not $arg_hash{'no_pgstatuple'}) {
  		$pgstattuple_schema_name = $self->_get_pgstattuple_schema_name();
  	}
  
  	if ($pgstattuple_schema_name) {
  		$self->{'_logger'}->write(
  			message => 'Statictics calculation method: pgstattuple.',
  			level => 'info',
  			target => $self->{'_log_target'});
  	} else {
  		$self->{'_logger'}->write(
  			message => 'Statictics calculation method: approximation.',
  			level => 'notice',
  			target => $self->{'_log_target'});
  	}
  
  	my $table_data_list = $self->_get_table_data_list(
  		schema_name_list => $arg_hash{'schema_name_list'},
  		excluded_schema_name_list => $arg_hash{'excluded_schema_name_list'},
  		table_name_list => $arg_hash{'table_name_list'},
  		excluded_table_name_list => $arg_hash{'excluded_table_name_list'});
  
  	$self->{'_table_compactor_list'} = [];
  	for my $table_data (@{$table_data_list}) {
  		my $table_compactor = $arg_hash{'table_compactor_constructor'}->(
  			database => $self->{'_database'},
  			schema_name => $table_data->{'schema_name'},
  			table_name => $table_data->{'table_name'},
  			pgstattuple_schema_name => $pgstattuple_schema_name);
  		push(@{$self->{'_table_compactor_list'}}, $table_compactor);
  	}
  
  	return;
  }
  
  sub _process {
  	my ($self, %arg_hash) = @_;
  
  	for my $table_compactor (@{$self->{'_table_compactor_list'}}) {
  		if (not $table_compactor->is_processed()) {
  			$table_compactor->process(attempt => $arg_hash{'attempt'});
  		}
  	}
  
  	if (not $self->{'_dry_run'}) {
  		if ($self->is_processed()) {
  			$self->{'_logger'}->write(
  				message => (
  					'Processing complete: size reduced by '.
  					PgToolkit::Utils->get_size_pretty(
  						size => $self->get_size_delta()).' ('.
  					PgToolkit::Utils->get_size_pretty(
  						size => $self->get_total_size_delta()).' including '.
  					'toasts and indexes) in total.'),
  				level => 'info',
  				target => $self->{'_log_target'});
  		} else {
  			$self->{'_logger'}->write(
  				message => (
  					'Processing incomplete: '.$self->_incomplete_count().
  					' tables left, size reduced by '.
  					PgToolkit::Utils->get_size_pretty(
  						size => $self->get_size_delta()).' ('.
  					PgToolkit::Utils->get_size_pretty(
  						size => $self->get_total_size_delta()).
  					' including toasts and indexes) in total.'),
  				level => 'warning',
  				target => $self->{'_log_target'});
  		}
  	}
  
  	return;
  }
  
  =head1 METHODS
  
  =head2 B<is_processed()>
  
  Tests if the database is processed.
  
  =head3 Returns
  
  True or false value.
  
  =cut
  
  sub is_processed {
  	my $self = shift;
  
  	my $result = 1;
  	map(($result &&= $_->is_processed()), @{$self->{'_table_compactor_list'}});
  
  	return $result;
  }
  
  =head2 B<get_size_delta()>
  
  Returns a size delta in bytes.
  
  =head3 Returns
  
  A number or undef if has not been processed.
  
  =cut
  
  sub get_size_delta {
  	my $self = shift;
  
  	my $result = 0;
  	map($result += $_->get_size_delta(),
  		@{$self->{'_table_compactor_list'}});
  
  	return $result;
  }
  
  =head2 B<get_total_size_delta()>
  
  Returns a total (including toasts and indexes) size delta in bytes.
  
  =head3 Returns
  
  A number or undef if has not been processed.
  
  =cut
  
  sub get_total_size_delta {
  	my $self = shift;
  
  	my $result = 0;
  	map($result += $_->get_total_size_delta(),
  		@{$self->{'_table_compactor_list'}});
  
  	return $result;
  }
  
  =head2 B<get_log_target()>
  
  Returns a database name for logging.
  
  =head3 Returns
  
  A string.
  
  =cut
  
  sub get_log_target {
  	my $self = shift;
  
  	return $self->{'_log_target'};
  }
  
  sub DESTROY {
  	my $self = shift;
  
  	if (not $self->{'_dry_run'}) {
  		$self->_drop_clean_pages_function();
  		$self->{'_logger'}->write(
  			message => 'Dropped environment.',
  			level => 'info',
  			target => $self->{'_log_target'});
  	}
  }
  
  sub _incomplete_count {
  	my $self = shift;
  
  	my $result = 0;
  	map(($result += not $_->is_processed()),
  		@{$self->{'_table_compactor_list'}});
  
  	return $result;
  }
  
  sub _get_pgstattuple_schema_name {
  	my $self = shift;
  
  	my $result = $self->_execute_and_log(
  			sql => <<SQL
  SELECT nspname FROM pg_catalog.pg_proc
  JOIN pg_catalog.pg_namespace AS n ON pronamespace = n.oid
  WHERE proname = 'pgstattuple' LIMIT 1
  SQL
  		);
  
  	return @{$result} ? $result->[0]->[0] : undef;
  }
  
  sub _get_table_data_list {
  	my ($self, %arg_hash) = @_;
  
  	my $table_in = '';
  	if (@{$arg_hash{'table_name_list'}}) {
  		$table_in =
  			'tablename IN ('.
  			join(', ', map("'$_'", @{$arg_hash{'table_name_list'}})).
  			') AND';
  	}
  
  	my $table_not_in = '';
  	if (@{$arg_hash{'excluded_table_name_list'}}) {
  		$table_not_in =
  			'tablename NOT IN ('.
  			join(', ', map("'$_'", @{$arg_hash{'excluded_table_name_list'}})).
  			') AND';
  	}
  
  	my $schema_in = '';
  	if (@{$arg_hash{'schema_name_list'}}) {
  		$schema_in =
  			'schemaname IN ('.
  			join(', ', map("'$_'", @{$arg_hash{'schema_name_list'}})).
  			') AND';
  	}
  
  	my $schema_not_in = '';
  	if (@{$arg_hash{'excluded_schema_name_list'}}) {
  		$schema_not_in =
  			'schemaname NOT IN ('.
  			join(', ', map("'$_'", @{$arg_hash{'excluded_schema_name_list'}})).
  			') AND';
  	}
  
  	my $result = $self->_execute_and_log(
  			sql => <<SQL
  SELECT schemaname, tablename FROM pg_catalog.pg_tables
  WHERE
      $schema_in
      $schema_not_in
      $table_in
      $table_not_in
      schemaname NOT IN ('pg_catalog', 'information_schema') AND
      schemaname !~ 'pg_.*'
  ORDER BY
      pg_catalog.pg_relation_size(
          quote_ident(schemaname) || '.' || quote_ident(tablename)),
      schemaname, tablename
  SQL
  		);
  
  	return [map({'schema_name' => $_->[0], 'table_name' => $_->[1]},
  				@{$result})];
  }
  
  sub _create_clean_pages_function {
  	my $self = shift;
  
  	$self->_execute_and_log(
  		sql => << 'SQL'
  CREATE OR REPLACE FUNCTION public._clean_pages(
      i_table_ident text,
      i_column_ident text,
      i_to_page integer,
      i_page_offset integer,
      i_max_tupples_per_page integer)
  RETURNS integer
  LANGUAGE plpgsql AS $$
  DECLARE
      _from_page integer := i_to_page - i_page_offset + 1;
      _min_ctid tid;
      _max_ctid tid;
      _ctid_list tid[];
      _next_ctid_list tid[];
      _ctid tid;
      _loop integer;
      _result_page integer;
      _update_query text :=
          'UPDATE ONLY ' || i_table_ident ||
          ' SET ' || i_column_ident || ' = ' || i_column_ident ||
          ' WHERE ctid = ANY($1) RETURNING ctid';
  BEGIN
      -- Check page argument values
      IF NOT (
          i_page_offset IS NOT NULL AND i_page_offset >= 1 AND
          i_to_page IS NOT NULL AND i_to_page >= 1 AND
          i_to_page >= i_page_offset)
      THEN
          RAISE EXCEPTION 'Wrong page arguments specified.';
      END IF;
  
      -- Check that session_replication_role is set to replica to
      -- prevent triggers firing
      IF NOT (
          SELECT setting = 'replica'
          FROM pg_catalog.pg_settings
          WHERE name = 'session_replication_role')
      THEN
          RAISE EXCEPTION 'The session_replication_role must be set to replica.';
      END IF;
  
      -- Define minimal and maximal ctid values of the range
      _min_ctid := (_from_page, 1)::text::tid;
      _max_ctid := (i_to_page, i_max_tupples_per_page)::text::tid;
  
      -- Build a list of possible ctid values of the range
      SELECT array_agg((pi, ti)::text::tid)
      INTO _ctid_list
      FROM generate_series(_from_page, i_to_page) AS pi
      CROSS JOIN generate_series(1, i_max_tupples_per_page) AS ti;
  
      <<_outer_loop>>
      FOR _loop IN 1..i_max_tupples_per_page LOOP
          _next_ctid_list := array[]::tid[];
  
          -- Update all the tuples in the range
          FOR _ctid IN EXECUTE _update_query USING _ctid_list
          LOOP
              IF _ctid > _max_ctid THEN
                  RAISE EXCEPTION 'No more free space left in the table.';
              ELSIF _ctid >= _min_ctid THEN
                  -- The tuple is still in the range, more updates are needed
                  _next_ctid_list := _next_ctid_list || _ctid;
              END IF;
          END LOOP;
  
          _ctid_list := _next_ctid_list;
  
          -- Finish processing if there are no tupples in the range left
          IF coalesce(array_length(_ctid_list, 1), 0) = 0 THEN
              _result_page := _from_page - 1;
              EXIT _outer_loop;
          END IF;
      END LOOP;
  
      -- No result
      IF _loop = i_max_tupples_per_page AND _result_page IS NULL THEN
          RAISE EXCEPTION
              'Maximal loops count has been reached with no result.';
      END IF;
  
      RETURN _result_page;
  END $$;
  SQL
  		);
  
  	return;
  }
  
  sub _drop_clean_pages_function {
  	my $self = shift;
  
  	$self->_execute_and_log(
  		sql => <<SQL
  DROP FUNCTION public._clean_pages(text, text, integer, integer, integer);
  SQL
  		);
  
  	return;
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<PgToolkit::Class>
  =item L<PgToolkit::Utils>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_COMPACTOR_DATABASE

$fatpacked{"PgToolkit/Compactor/Table.pm"} = <<'PGTOOLKIT_COMPACTOR_TABLE';
  package PgToolkit::Compactor::Table;
  
  use base qw(PgToolkit::Compactor);
  
  use strict;
  use warnings;
  
  use POSIX;
  use Time::HiRes qw(time sleep);
  
  use PgToolkit::Utils;
  
  =head1 NAME
  
  B<PgToolkit::Compactor::Table> - a table level processing for bloat reducing.
  
  =head1 SYNOPSIS
  
  	my $table_compactor = PgToolkit::Compactor::Table->new(
  		database => $database,
  		logger => $logger,
  		dry_run => 0,
  		schema_name => $schema_name,
  		table_name => $table_name,
  		min_page_count => 100,
  		min_free_percent => 10,
  		max_pages_per_round => 5,
  		no_initial_vacuum => 0,
  		no_routine_vacuum => 0,
  		no_final_analyze => 0,
  		delay_constant => 1,
  		delay_ratio => 2,
  		force => 0,
  		reindex => 0,
  		print_reindex_queries => 0,
  		progress_report_period => 60,
  		pgstattuple_schema_name => 'public',
  		pages_per_round_divisor = 1000,
  		pages_before_vacuum_lower_divisor = 16,
  		pages_before_vacuum_lower_threshold = 1000,
  		pages_before_vacuum_upper_divisor = 50,
  		max_retry_count => 10);
  
  	$table_compactor->process();
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Compactor::Table> class is an implementation of a table level
  processing logic for bloat reducing mechanism.
  
  =head3 Constructor arguments
  
  =over 4
  
  =item C<database>
  
  a database object
  
  =item C<logger>
  
  a logger object
  
  =item C<dry_run>
  
  =item C<schema_name>
  
  a schema name to process
  
  =item C<table_name>
  
  a table name to process
  
  =item C<min_page_count>
  
  a minimum number of pages that is worth to compact with for both
  tables and indexes
  
  =item C<min_free_percent>
  
  a mininum free space percent that is worth to compact with for both
  tables and indexes
  
  =item C<max_pages_per_round>
  
  an upper threshold of pages to process per one round
  
  =item C<no_initial_vacuum>
  
  perform no initial vacuum
  
  =item C<no_routine_vacuum>
  
  perform no routine vacuum
  
  =item C<no_fianl_analyze>
  
  perform no final analyze
  
  =item C<delay_constant>
  
  the constant part of the delay between rounds in seconds
  
  =item C<delay_ratio>
  
  the dynamic part of the delay between rounds
  
  =item C<force>
  
  process the table even if it does not meet the minimum pages and free
  space
  
  =item C<reindex>
  
  reindex the table after compacting
  
  =item C<print_reindex_queries>
  
  logs reindex queries after processing
  
  =item C<progress_report_period>
  
  a period in seconds to report the progress with
  
  =item C<pgstattuple_schema_name>
  
  schema where pgstattuple is if we should use it to get statistics
  
  =item C<pages_per_round_divisor>
  
  is used to calculate a pages per round value, recommended to set to 1000
  
   min(
       max(1/pages_per_round_divisor of the real page count, 1),
       max_pages_per_round)
  
  =item C<pages_before_vacuum_lower_divisor>
  
  =item C<pages_before_vacuum_lower_threshold>
  
  =item C<pages_before_vacuum_upper_divisor>
  
  are used to calculate a pages before vacuum value, recommended to set to
  16, 1000 and 50 respectively
  
   max(
       min(
           1/pages_before_vacuum_lower_divisor of the real page count,
           1000),
       1/pages_before_vacuum_upper_divisor of the expected page count,
       1)
  
  =item C<max_retry_count>
  
  a maximum amount of attempts to compact cluster.
  
  =back
  
  =cut
  
  sub _init {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_database'} = $arg_hash{'database'};
  	$self->{'_logger'} = $arg_hash{'logger'};
  	$self->{'_schema_name'} = $arg_hash{'schema_name'};
  	$self->{'_table_name'} = $arg_hash{'table_name'};
  
  	$self->{'_ident'} =
  		$self->{'_database'}->quote_ident(
  			string => $self->{'_schema_name'}).'.'.
  		$self->{'_database'}->quote_ident(
  			string => $self->{'_table_name'});
  
  	$self->{'_log_target'} = $self->{'_database'}->quote_ident(
  		string => $self->{'_database'}->get_dbname()).', '.$self->{'_ident'};
  
  	$self->{'_min_page_count'} = $arg_hash{'min_page_count'};
  	$self->{'_min_free_percent'} = $arg_hash{'min_free_percent'};
  	$self->{'_max_pages_per_round'} = $arg_hash{'max_pages_per_round'};
  	$self->{'_no_initial_vacuum'} = $arg_hash{'no_initial_vacuum'};
  	$self->{'_no_routine_vacuum'} = $arg_hash{'no_routine_vacuum'};
  	$self->{'_no_final_analyze'} = $arg_hash{'no_final_analyze'};
  	$self->{'_delay_constant'} = $arg_hash{'delay_constant'};
  	$self->{'_delay_ratio'} = $arg_hash{'delay_ratio'};
  	$self->{'_force'} = $arg_hash{'force'};
  	$self->{'_reindex'} = $arg_hash{'reindex'};
  	$self->{'_print_reindex_queries'} = $arg_hash{'print_reindex_queries'};
  	$self->{'_max_retry_count'} = $arg_hash{'max_retry_count'};
  
  	$self->{'_progress_report_period'} = $arg_hash{'progress_report_period'};
  	if ($arg_hash{'pgstattuple_schema_name'}) {
  		$self->{'_pgstattuple_schema_ident'} =
  			$self->{'_database'}->quote_ident(
  				string => $arg_hash{'pgstattuple_schema_name'});
  	}
  	$self->{'_pages_per_round_divisor'} = $arg_hash{'pages_per_round_divisor'};
  	$self->{'_pages_before_vacuum_lower_divisor'} =
  		$arg_hash{'pages_before_vacuum_lower_divisor'};
  	$self->{'_pages_before_vacuum_lower_threshold'} =
  		$arg_hash{'pages_before_vacuum_lower_threshold'};
  	$self->{'_pages_before_vacuum_upper_divisor'} =
  		$arg_hash{'pages_before_vacuum_upper_divisor'};
  
  	$self->{'_is_processed'} = 0;
  
  	return;
  }
  
  sub process {
  	my ($self, %arg_hash) = @_;
  
  	eval {
  		$self->_process(%arg_hash);
  	};
  	if ($@) {
  		my $name = $self->{'_schema_name'}.'.'.$self->{'_table_name'};
  		if ($@ =~ ('relation "'.$name.'" does not exist')) {
  			$self->_log_relation_does_not_exist();
  			$self->{'_is_processed'} = 1;
  		} else {
  			my $error = $@;
  			$self->_wrap(code => sub { die($error); });
  		}
  	}
  }
  
  sub _process {
  	my ($self, %arg_hash) = @_;
  
  	my $duration;
  	my $is_skipped;
  
  	$self->{'_size_statistics'} = $self->_get_size_statistics();
  
  	if (not defined $self->{'_base_size_statistics'}) {
  		$self->{'_base_size_statistics'} = {%{$self->{'_size_statistics'}}};
  	}
  
  	if (not $self->{'_dry_run'} and not $self->{'_no_initial_vacuum'}) {
  		$self->_do_vacuum();
  		$duration = $self->{'_database'}->get_duration();
  
  		$self->{'_size_statistics'} = $self->_get_size_statistics();
  
  		$self->_log_vacuum_complete(
  			page_count => $self->{'_size_statistics'}->{'page_count'},
  			duration => $duration,
  			to_page => $self->{'_size_statistics'}->{'page_count'} - 1,
  			pages_before_vacuum => (
  				$self->{'_size_statistics'}->{'page_count'}),
  			phrase => 'initial');
  	}
  
  	if ($self->{'_size_statistics'}->{'page_count'} <= 1) {
  		$self->_log_skipping_empty_table();
  		$is_skipped = 1;;
  	}
  
  	if (not $is_skipped) {
  		$self->{'_bloat_statistics'} = $self->_get_bloat_statistics();
  		if ($self->{'_pgstattuple_schema_ident'}) {
  			$self->_log_pgstattuple_duration(
  				duration => $self->{'_database'}->get_duration());
  		}
  
  		if (not defined
  			$self->{'_bloat_statistics'}->{'effective_page_count'})
  		{
  			$self->_do_analyze();
  			$self->_log_analyze_complete(
  				duration => $self->{'_database'}->get_duration(),
  				phrase => 'required initial');
  
  			$self->{'_bloat_statistics'} = $self->_get_bloat_statistics();
  			if ($self->{'_pgstattuple_schema_ident'}) {
  				$self->_log_pgstattuple_duration(
  					duration => $self->{'_database'}->get_duration());
  			}
  
  			if (not defined
  				$self->{'_bloat_statistics'}->{'effective_page_count'})
  			{
  				$self->_log_skipping_can_not_get_bloat_statistics();
  				$is_skipped = 1;;
  			}
  		}
  	}
  
  	if (not $is_skipped) {
  		$self->_log_statistics(
  			size_statistics => $self->{'_size_statistics'},
  			bloat_statistics => $self->{'_bloat_statistics'});
  
  		if ($self->_has_special_triggers()) {
  			$self->_log_can_not_process_ar_triggers();
  			$is_skipped = 1;;
  		}
  
  		if (not $self->{'_force'}) {
  			if ($self->{'_size_statistics'}->{'page_count'} <
  				$self->{'_min_page_count'})
  			{
  				$self->_log_skipping_min_page_count(
  					page_count => $self->{'_size_statistics'}->{'page_count'});
  				$is_skipped = 1;;
  			}
  
  			if ($self->{'_bloat_statistics'}->{'free_percent'} <
  				$self->{'_min_free_percent'})
  			{
  				$self->_log_skipping_min_free_percent(
  					free_percent => (
  						$self->{'_bloat_statistics'}->{'free_percent'}));
  				$is_skipped = 1;;
  			}
  		}
  	}
  
  	my $is_compacted;
  	if (not $is_skipped and not $self->{'_dry_run'}) {
  		if ($self->{'_force'}) {
  			$self->_log_processing_forced();
  		}
  
  		my $vacuum_page_count = 0;
  		my $initial_size_statistics = {%{$self->{'_size_statistics'}}};
  		my $to_page = $self->{'_size_statistics'}->{'page_count'} - 1;
  		my $progress_report_time = $self->_time();
  		my $clean_pages_total_duration = 0;
  		my $last_loop = $self->{'_size_statistics'}->{'page_count'} + 1;
  		my $expected_error_occurred = 0;
  
  		my $expected_page_count = $self->{'_size_statistics'}->{'page_count'};
  		my $column_ident = $self->{'_database'}->quote_ident(
  			string => $self->_get_update_column());
  		my $pages_per_round = $self->_get_pages_per_round(
  			page_count => $self->{'_size_statistics'}->{'page_count'},
  			to_page => $to_page);
  		my $pages_before_vacuum = $self->_get_pages_before_vacuum(
  			expected_page_count => $expected_page_count,
  			page_count => $self->{'_size_statistics'}->{'page_count'});
  		my $max_tupples_per_page = $self->_get_max_tupples_per_page();
  		$self->_log_column(name => $column_ident);
  		$self->_log_pages_per_round(value => $pages_per_round);
  		$self->_log_pages_before_vacuum(value => $pages_before_vacuum);
  
  		my $loop;
  		for ($loop = $self->{'_size_statistics'}->{'page_count'};
  			 $loop > 0 ; $loop--)
  		{
  			my $start_time = $self->_time();
  
  			my $last_to_page = $to_page;
  			eval {
  				$to_page = $self->_clean_pages(
  					column_ident => $column_ident,
  					to_page => $last_to_page,
  					pages_per_round => $pages_per_round,
  					max_tupples_per_page => $max_tupples_per_page);
  				$clean_pages_total_duration =
  					$clean_pages_total_duration +
  					$self->{'_database'}->get_duration();
  			};
  			if ($@) {
  				if ($@ =~ 'No more free space left in the table') {
  					# Normal cleaning completion
  				} elsif ($@ =~ 'deadlock detected') {
  					$self->_log_deadlock_detected();
  					next;
  				} elsif ($@ =~ 'cannot extract system attribute') {
  					$self->_log_cannot_extract_system_attribute();
  					$expected_error_occurred = 1;
  				} else {
  					die($@);
  				}
  				last;
  			}
  
  			$self->_sleep(
  				$self->{'_delay_constant'} + $self->{'_delay_ratio'} *
  				($self->_time() - $start_time));
  
  			if ($self->_time() - $progress_report_time >=
  				$self->{'_progress_report_period'} and
  				$last_to_page != $to_page)
  			{
  				$self->_log_progress(
  					page_count => $initial_size_statistics->{'page_count'},
  					effective_page_count => (
  						$self->{'_bloat_statistics'}->{'effective_page_count'}),
  					to_page => $to_page);
  				$progress_report_time = $self->_time();
  			}
  
  			$expected_page_count -= $pages_per_round;
  			$vacuum_page_count += ($last_to_page - $to_page);
  
  			if (not $self->{'_no_routine_vacuum'} and
  				$vacuum_page_count >= $pages_before_vacuum)
  			{
  				$self->_log_clean_pages_average(
  					pages_per_round => $pages_per_round,
  					average_duration => (
  						$clean_pages_total_duration / ($last_loop - $loop)));
  				$clean_pages_total_duration = 0;
  				$last_loop = $loop;
  
  				$self->_do_vacuum();
  				$duration = $self->{'_database'}->get_duration();
  
  				$self->{'_size_statistics'} = $self->_get_size_statistics();
  
  				$self->_log_vacuum_complete(
  					page_count => $self->{'_size_statistics'}->{'page_count'},
  					duration => $duration,
  					to_page => $to_page,
  					pages_before_vacuum => $pages_before_vacuum,
  					phrase => 'routine');
  
  				$vacuum_page_count = 0;
  
  				my $last_pages_before_vacuum = $pages_before_vacuum;
  				$pages_before_vacuum = $self->_get_pages_before_vacuum(
  					expected_page_count => $expected_page_count,
  					page_count => $self->{'_size_statistics'}->{'page_count'});
  				if ($last_pages_before_vacuum != $pages_before_vacuum) {
  					$self->_log_pages_before_vacuum(
  						value => $pages_before_vacuum);
  				}
  			}
  
  			if ($to_page >= $self->{'_size_statistics'}->{'page_count'}) {
  				$to_page = $self->{'_size_statistics'}->{'page_count'} - 1;
  			}
  
  			if ($to_page <= 1) {
  				$to_page = 0;
  				last;
  			}
  
  			my $last_pages_per_round = $pages_per_round;
  			$pages_per_round = $self->_get_pages_per_round(
  				page_count => $self->{'_size_statistics'}->{'page_count'},
  				to_page => $to_page);
  			if ($last_pages_per_round != $pages_per_round) {
  				$self->_log_pages_per_round(
  					value => $pages_per_round);
  			}
  		}
  
  		if ($loop == 0) {
  			$self->_log_max_loops();
  		}
  
  		if ($to_page > 0) {
  			$self->_do_vacuum();
  			$duration = $self->{'_database'}->get_duration();
  
  			$self->{'_size_statistics'} = $self->_get_size_statistics();
  
  			$self->_log_vacuum_complete(
  				page_count => $self->{'_size_statistics'}->{'page_count'},
  				duration => $duration,
  				to_page => $to_page + $pages_per_round,
  				pages_before_vacuum => $pages_before_vacuum,
  				phrase => 'final');
  		}
  
  		if (not $self->{'_no_final_analyze'}) {
  			$self->_do_analyze();
  			$self->_log_analyze_complete(
  				duration => $self->{'_database'}->get_duration(),
  				phrase => 'final');
  		}
  
  		$self->{'_bloat_statistics'} = $self->_get_bloat_statistics();
  		if ($self->{'_pgstattuple_schema_ident'}) {
  			$self->_log_pgstattuple_duration(
  				duration => $self->{'_database'}->get_duration());
  		}
  
  		$pages_before_vacuum = $self->_get_pages_before_vacuum(
  			expected_page_count => $expected_page_count,
  			page_count => $self->{'_size_statistics'}->{'page_count'});
  
  		$is_compacted = (
  			($self->{'_size_statistics'}->{'page_count'} <=
  			 $to_page + 1 + $pages_before_vacuum) and
  			not $expected_error_occurred);
  	}
  
  	my $will_be_skipped = (
  		not $self->{'_force'} and (
  			$self->{'_size_statistics'}->{'page_count'} <
  			$self->{'_min_page_count'} or
  			$self->{'_bloat_statistics'}->{'free_percent'} <
  			$self->{'_min_free_percent'}));
  
  	my $is_reindexed;
  	if (($self->{'_dry_run'} or $is_compacted or
  		 $arg_hash{'attempt'} == $self->{'_max_retry_count'} or
  		 $is_skipped and $self->{'_pgstattuple_schema_ident'} or
  		 not $is_skipped and $will_be_skipped) and
  		($self->{'_reindex'} or $self->{'_print_reindex_queries'}))
  	{
  		for my $index_data (@{$self->_get_index_data_list()}) {
  			my $index_ident =
  				$self->{'_database'}->quote_ident(
  					string => $self->{'_schema_name'}).'.'.
  					$self->{'_database'}->quote_ident(
  						string => $index_data->{'name'});
  
  			my $initial_index_size_statistics =
  				$self->_get_index_size_statistics(ident => $index_ident);
  
  			if ($initial_index_size_statistics->{'page_count'} <= 1) {
  				$self->_log_skipping_reindex_empty(
  					ident => $index_ident);
  				next;
  			}
  
  			my $index_bloat_statistics;
  			if (not $self->{'_force'}) {
  				if ($index_data->{'method'} ne 'btree') {
  					$self->_log_skipping_reindex_not_btree(
  						index_data => $index_data,
  						ident => $index_ident);
  					$self->_log_reindex_queries(
  						ident => $index_ident,
  						initial_size_statistics => (
  							$initial_index_size_statistics),
  						bloat_statistics => undef,
  						data => $index_data);
  					next;
  				}
  
  				if ($initial_index_size_statistics->{'page_count'} <
  					$self->{'_min_page_count'})
  				{
  					$self->_log_skipping_reindex_min_page_count(
  						ident => $index_ident,
  						size_statistics => $initial_index_size_statistics);
  					next;
  				}
  
  				if ($self->{'_pgstattuple_schema_ident'})
  				{
  					$index_bloat_statistics =
  						$self->_get_index_bloat_statistics(
  							ident => $index_ident);
  
  					if ($index_bloat_statistics->{'free_percent'} <
  						$self->{'_min_free_percent'})
  					{
  						$self->_log_skipping_reindex_min_free_percent(
  							ident => $index_ident,
  							bloat_statistics => $index_bloat_statistics);
  						next;
  					}
  				}
  			}
  
  			if (not $index_data->{'allowed'}) {
  				$self->_log_skipping_reindex_not_allowed(
  					ident => $index_ident);
  				$self->_log_reindex_queries(
  					ident => $index_ident,
  					initial_size_statistics => $initial_index_size_statistics,
  					bloat_statistics => $index_bloat_statistics,
  					data => $index_data);
  				next;
  			}
  
  			if (not $self->{'_dry_run'} and $self->{'_reindex'}) {
  				$self->_reindex(data => $index_data);
  				$duration = $self->{'_database'}->get_duration();
  				$self->_alter_index(data => $index_data);
  				$duration += $self->{'_database'}->get_duration();
  				$self->_log_reindex(
  					ident => $index_ident,
  					initial_size_statistics => $initial_index_size_statistics,
  					size_statistics => $self->_get_index_size_statistics(
  						ident => $index_ident),
  					duration => $duration);
  
  				$is_reindexed = 1;
  			}
  
  			if ($self->{'_dry_run'} or $self->{'_print_reindex_queries'}) {
  				$self->_log_reindex_queries(
  					ident => $index_ident,
  					initial_size_statistics => $initial_index_size_statistics,
  					bloat_statistics => $index_bloat_statistics,
  					data => $index_data);
  			}
  		}
  
  		if (not $self->{'_dry_run'} and $self->{'_reindex'}) {
  			$self->{'_size_statistics'} = $self->_get_size_statistics();
  		}
  	}
  
  	if (not $self->{'_dry_run'}) {
  		if ($is_compacted or
  			$is_skipped and $is_reindexed or
  			not $is_skipped and $will_be_skipped)
  		{
  			$self->_log_complete_processing(
  				size_statistics => $self->{'_size_statistics'},
  				bloat_statistics => $self->{'_bloat_statistics'},
  				base_size_statistics => $self->{'_base_size_statistics'});
  		} elsif (not $is_skipped) {
  			$self->_log_incomplete_processing(
  				size_statistics => $self->{'_size_statistics'},
  				bloat_statistics => $self->{'_bloat_statistics'},
  				base_size_statistics => $self->{'_base_size_statistics'});
  		}
  	}
  
  	$self->{'_is_processed'} = (
  		$is_compacted or $is_skipped or $will_be_skipped or
  		$self->{'_dry_run'});
  
  	return;
  }
  
  =head1 METHODS
  
  =head2 B<is_processed()>
  
  Tests if the table is processed.
  
  =head3 Returns
  
  True or false value.
  
  =cut
  
  sub is_processed {
  	my $self = shift;
  
  	return $self->{'_is_processed'};
  }
  
  =head2 B<get_ident()>
  
  Returns a table ident.
  
  =head3 Returns
  
  A string representing the ident.
  
  =cut
  
  sub get_log_ident {
  	my $self = shift;
  
  	return $self->{'_log_ident'};
  }
  
  =head2 B<get_size_delta()>
  
  Returns a size delta in bytes.
  
  =head3 Returns
  
  A number or undef if has not been processed.
  
  =cut
  
  sub get_size_delta {
  	my $self = shift;
  
  	return
  		$self->{'_base_size_statistics'}->{'size'} -
  		$self->{'_size_statistics'}->{'size'};
  }
  
  =head2 B<get_total_size_delta()>
  
  Returns a tital (including toasts and indexes) size delta in bytes.
  
  =head3 Returns
  
  A number or undef if has not been processed.
  
  =cut
  
  sub get_total_size_delta {
  	my $self = shift;
  
  	return
  		$self->{'_base_size_statistics'}->{'total_size'} -
  		$self->{'_size_statistics'}->{'total_size'};
  }
  
  sub _log_skipping_empty_table {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => 'Skipping processing: empty or 1 page table.',
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_can_not_process_ar_triggers {
  	my $self = shift;
  
  	$self->{'_logger'}->write(
  		message => 'Can not process: "always" or "replica" triggers are on.',
  		level => 'warning',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_skipping_min_page_count {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Skipping processing: '.$arg_hash{'page_count'}.' pages from '.
  			$self->{'_min_page_count'}.' pages minimum required.'),
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_vacuum_complete {
  	my ($self, %arg_hash) = @_;
  
  	if ($arg_hash{'page_count'} > $arg_hash{'to_page'} + 1) {
  		my $level;
  		if ($arg_hash{'page_count'} - ($arg_hash{'to_page'} + 1) <=
  			$arg_hash{'pages_before_vacuum'} * 2)
  		{
  			$level = 'info';
  		} else {
  			$level = 'notice';
  		}
  
  		$self->{'_logger'}->write(
  			message => (
  				'Vacuum '.$arg_hash{'phrase'}.': can not clean '.
  				($arg_hash{'page_count'} - $arg_hash{'to_page'} - 1).' pages, '.
  				$arg_hash{'page_count'}.' pages left, duration '.
  				sprintf("%.3f", $arg_hash{'duration'}).' seconds.'),
  			level => $level,
  			target => $self->{'_log_target'});
  	} else {
  		$self->{'_logger'}->write(
  			message => (
  				'Vacuum '.$arg_hash{'phrase'}.': '.$arg_hash{'page_count'}.
  				' pages left, duration '.sprintf("%.3f", $arg_hash{'duration'}).
  				' seconds.'),
  			level => 'info',
  			target => $self->{'_log_target'});
  	}
  
  	return;
  }
  
  sub _log_skipping_min_free_percent {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Skipping processing: '.
  			$arg_hash{'free_percent'}.'% space to compact from '.
  			$self->{'_min_free_percent'}.'% minimum required.'),
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_skipping_can_not_get_bloat_statistics {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => 'Skipping processing: can not get bloat statistics.',
  		level => 'warning',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_processing_forced {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => 'Processing forced.',
  		level => 'notice',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_statistics {
  	my ($self, %arg_hash) = @_;
  
  	my $can_be_compacted = (
  		$arg_hash{'bloat_statistics'}->{'free_percent'} > 0 and
  		$arg_hash{'size_statistics'}->{'page_count'} >
  		$arg_hash{'bloat_statistics'}->{'effective_page_count'});
  
  	$self->{'_logger'}->write(
  		message => (
  			'Statistics: '.
  			$arg_hash{'size_statistics'}->{'page_count'}.' pages ('.
  			$arg_hash{'size_statistics'}->{'total_page_count'}.
  			' pages including toasts and indexes)'.
  			($can_be_compacted ? ', approximately '.
  			 $arg_hash{'bloat_statistics'}->{'free_percent'}.'% ('.
  			 ($arg_hash{'size_statistics'}->{'page_count'} -
  			  $arg_hash{'bloat_statistics'}->{'effective_page_count'}).
  			 ' pages) can be compacted reducing the size by '.
  			 PgToolkit::Utils->get_size_pretty(
  				 size => $arg_hash{'bloat_statistics'}->{'free_space'})
  			 : '').'.'),
  		level => 'notice',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_column {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => 'Update by column: '.$arg_hash{'name'}.'.',
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_pages_per_round {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => 'Set pages/round: '.$arg_hash{'value'}.'.',
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_pages_before_vacuum {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => 'Set pages/vacuum: '.$arg_hash{'value'}.'.',
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_clean_pages_average {
  	my ($self, %arg_hash) = @_;
  
  	my $duration = sprintf("%.3f", $arg_hash{'average_duration'});
  
  	if ($arg_hash{'average_duration'} == 0) {
  		$arg_hash{'average_duration'} = 0.0001;
  	}
  
  	$self->{'_logger'}->write(
  		message => (
  			'Cleaning in average: '.
  			sprintf("%.1f", $arg_hash{'pages_per_round'} /
  					$arg_hash{'average_duration'}).
  			' pages/second ('.$duration.' seconds per '.
  			$arg_hash{'pages_per_round'}.' pages).'),
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_progress {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Progress: '.
  			(defined $arg_hash{'effective_page_count'} ?
  			 int(
  				 100 *
  				 ($arg_hash{'to_page'} ?
  				  ($arg_hash{'page_count'} - $arg_hash{'to_page'} - 1) /
  				  ($arg_hash{'page_count'} -
  				   $arg_hash{'effective_page_count'}) :
  				  1)
  			 ).'%, ' : ' ').
  			($arg_hash{'page_count'} - $arg_hash{'to_page'} - 1).
  			' pages completed.'),
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_max_loops {
  	my $self = shift;
  
  	$self->{'_logger'}->write(
  		message => 'Maximum loops reached.',
  		level => 'warning',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_analyze_complete {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => ('Analyze '.$arg_hash{'phrase'}.': duration '.
  					sprintf("%.3f", $arg_hash{'duration'}).' second.'),
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_skipping_reindex_not_allowed {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Skipping reindex: '.$arg_hash{'ident'}.
  			', can not reindex without heavy locks because '.
  			'of its dependencies, reindexing is up to you.'),
  		level => 'notice',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_skipping_reindex_not_btree {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Skipping reindex: '.$arg_hash{'ident'}.' is a '.
  			$arg_hash{'index_data'}->{'method'}.' index not a btree, '.
  			'reindexing is up to you.'),
  		level => 'notice',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_skipping_reindex_empty {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Skipping reindex: '.$arg_hash{'ident'}.', empty or 1 page index.'),
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_skipping_reindex_min_page_count {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Skipping reindex: '.$arg_hash{'ident'}.', '.
  			$arg_hash{'size_statistics'}->{'page_count'}.' pages from '.
  			$self->{'_min_page_count'}.' pages minimum required.'),
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_skipping_reindex_min_free_percent {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Skipping reindex: '.$arg_hash{'ident'}.', '.
  			$arg_hash{'bloat_statistics'}->{'free_percent'}.
  			'% space to compact from '.$self->{'_min_free_percent'}.
  			'% minimum required.'),
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_reindex {
  	my ($self, %arg_hash) = @_;
  
  	my $free_percent = 100 *(
  		1 - $arg_hash{'size_statistics'}->{'size'} /
  		$arg_hash{'initial_size_statistics'}->{'size'});
  
  	my $free_space = (
  		$arg_hash{'initial_size_statistics'}->{'size'} -
  		$arg_hash{'size_statistics'}->{'size'});
  
  	$self->{'_logger'}->write(
  		message => (
  			'Reindex'.($self->{'_force'} ? ' forced' : '').': '.
  			$arg_hash{'ident'}.', '.
  			($arg_hash{'size_statistics'} ? 'initial size '.
  			 $arg_hash{'size_statistics'}->{'page_count'}.' pages ('.
  			 PgToolkit::Utils->get_size_pretty(
  				 size => $arg_hash{'size_statistics'}->{'size'}).
  			 '), has been reduced by '.
  			 int($free_percent).'% ('.
  			 PgToolkit::Utils->get_size_pretty(
  				 size => int($free_space)).'), ' : '').
  			'duration '.sprintf("%.3f", $arg_hash{'duration'}).' seconds.'),
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_reindex_queries {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Reindex queries'.($self->{'_force'} ? ' forced' : '').': '.
  			$arg_hash{'ident'}.
  			($arg_hash{'initial_size_statistics'} ? ', initial size '.
  			 $arg_hash{'initial_size_statistics'}->{'page_count'}.' pages ('.
  			 PgToolkit::Utils->get_size_pretty(
  				 size => $arg_hash{'initial_size_statistics'}->{'size'}).')'.
  			 ($arg_hash{'bloat_statistics'} ? ', will be reduced by '.
  			  $arg_hash{'bloat_statistics'}->{'free_percent'}.'% ('.
  			  PgToolkit::Utils->get_size_pretty(
  				  size => $arg_hash{'bloat_statistics'}->{'free_space'}).
  			  ')' : '') : '').".\n".
  			($arg_hash{'data'}->{'allowed'} ?
  			 $self->_get_reindex_query(data => $arg_hash{'data'})."\n".
  			 $self->_get_alter_index_query(data => $arg_hash{'data'}) :
  			 $self->_get_straight_reindex_query(data => $arg_hash{'data'}))),
  		level => 'notice',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_incomplete_processing {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Processing incomplete: '.
  			$self->_get_log_processing_results(
  				size_statistics => $arg_hash{'size_statistics'},
  				bloat_statistics => $arg_hash{'bloat_statistics'},
  				base_size_statistics => $arg_hash{'base_size_statistics'},
  				complete => 0)),
  		level => 'warning',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_complete_processing {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => (
  			'Processing complete: '.
  			$self->_get_log_processing_results(
  				size_statistics => $arg_hash{'size_statistics'},
  				bloat_statistics => $arg_hash{'bloat_statistics'},
  				base_size_statistics => $arg_hash{'base_size_statistics'},
  				complete => 1)),
  		level => 'notice',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _get_log_processing_results {
  	my ($self, %arg_hash) = @_;
  
  	my $can_be_compacted = (
  		defined $arg_hash{'bloat_statistics'}->{'free_percent'} and
  		defined $arg_hash{'bloat_statistics'}->{'effective_page_count'} and
  		$arg_hash{'bloat_statistics'}->{'free_percent'} > 0 and
  		$arg_hash{'size_statistics'}->{'page_count'} >
  		$arg_hash{'bloat_statistics'}->{'effective_page_count'} and
  		not $arg_hash{'complete'});
  
  	return
  		'left '.$arg_hash{'size_statistics'}->{'page_count'}.' pages ('.
  		$arg_hash{'size_statistics'}->{'total_page_count'}.
  		' pages including toasts and indexes), size reduced by '.
  		PgToolkit::Utils->get_size_pretty(
  			size => ($arg_hash{'base_size_statistics'}->{'size'} -
  					 $arg_hash{'size_statistics'}->{'size'})).' ('.
  		PgToolkit::Utils->get_size_pretty(
  			size => ($arg_hash{'base_size_statistics'}->{'total_size'} -
  					 $arg_hash{'size_statistics'}->{'total_size'})).
  		' including toasts and indexes) in total'.
  		($can_be_compacted ? ', approximately '.
  		 $arg_hash{'bloat_statistics'}->{'free_percent'}.'% ('.
  		 ($arg_hash{'size_statistics'}->{'page_count'} -
  		  $arg_hash{'bloat_statistics'}->{'effective_page_count'}).
  		 ' pages) that is '.
  		 PgToolkit::Utils->get_size_pretty(
  			 size => $arg_hash{'bloat_statistics'}->{'free_space'}).
  		 ' more were expected to be compacted after this attempt' :
  		 '').'.';
  }
  
  sub _log_deadlock_detected {
  	my $self = shift;
  
  	$self->{'_logger'}->write(
  		message => 'Detected deadlock during cleaning.',
  		level => 'notice',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_cannot_extract_system_attribute {
  	my $self = shift;
  
  	$self->{'_logger'}->write(
  		message => ('Stopped processing as a system attribute extraction '.
  					'error has occurred.'),
  		level => 'warning',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_relation_does_not_exist {
  	my $self = shift;
  
  	$self->{'_logger'}->write(
  		message => ('Stopped processing as a relation does not exist '.
  					'error has occurred.'),
  		level => 'warning',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _log_pgstattuple_duration {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_logger'}->write(
  		message => ('Bloat statistics with pgstattuple: duration '.
  					sprintf("%.3f", $arg_hash{'duration'}).' seconds.'),
  		level => 'info',
  		target => $self->{'_log_target'});
  
  	return;
  }
  
  sub _sleep {
  	my ($self, $time) = @_;
  
  	sleep($time);
  
  	return;
  }
  
  sub _time {
  	return time();
  }
  
  sub _has_special_triggers {
  	my $self = shift;
  
  	my $result = $self->_execute_and_log(
  		sql => <<SQL
  SELECT count(1) FROM pg_catalog.pg_trigger
  WHERE
      tgrelid = '$self->{'_ident'}'::regclass AND
      tgenabled IN ('A', 'R') AND
      (tgtype & 16)::boolean
  SQL
  		);
  
  	return $result->[0]->[0];
  }
  
  sub _get_max_tupples_per_page {
  	my $self = shift;
  
  	my $result = $self->_execute_and_log(
  		sql => <<SQL
  SELECT ceil(current_setting('block_size')::real / sum(attlen))
  FROM pg_catalog.pg_attribute
  WHERE
      attrelid = '$self->{'_ident'}'::regclass AND
      attnum < 0;
  SQL
  		);
  
  	return $result->[0]->[0];
  }
  
  sub _get_bloat_statistics {
  	my $self = shift;
  
  	my $result;
  	if ($self->{'_pgstattuple_schema_ident'}) {
  		$result = $self->_execute_and_log(
  			sql => <<SQL
  SELECT
      ceil((size - free_space) * 100 / fillfactor / bs) AS effective_page_count,
      round(
          (100 * (1 - (100 - free_percent) / fillfactor))::numeric, 2
      ) AS free_percent,
      ceil(size - (size - free_space) * 100 / fillfactor) AS free_space
  FROM (
      SELECT
          current_setting('block_size')::integer AS bs,
          pg_catalog.pg_relation_size(pg_catalog.pg_class.oid) AS size,
          coalesce(
              (
                  SELECT (
                      regexp_matches(
                          reloptions::text, E'.*fillfactor=(\\\\d+).*'))[1]),
              '100')::real AS fillfactor,
          pgst.*
      FROM pg_catalog.pg_class
      CROSS JOIN
          $self->{'_pgstattuple_schema_ident'}.pgstattuple(
              '$self->{'_ident'}') AS pgst
      WHERE pg_catalog.pg_class.oid = '$self->{'_ident'}'::regclass
  ) AS sq
  SQL
  			);
  	} else {
  		$result = $self->_execute_and_log(
  			sql => <<SQL
  SELECT
      ceil(pure_page_count * 100 / fillfactor) AS effective_page_count,
      CASE WHEN size::real > 0 THEN
          round(
              100 * (
                  1 - (pure_page_count * 100 / fillfactor) / (size::real / bs)
              )::numeric, 2
          )
      ELSE 0 END AS free_percent,
      ceil(size::real - bs * pure_page_count * 100 / fillfactor) AS free_space
  FROM (
      SELECT
          bs, size, fillfactor,
          ceil(
              reltuples * (
                  max(stanullfrac) * ma * ceil(
                      (
                          ma * ceil(
                              (
                                  header_width +
                                  ma * ceil(count(1)::real / ma)
                              )::real / ma
                          ) + sum((1 - stanullfrac) * stawidth)
                      )::real / ma
                  ) +
                  (1 - max(stanullfrac)) * ma * ceil(
                      (
                          ma * ceil(header_width::real / ma) +
                          sum((1 - stanullfrac) * stawidth)
                      )::real / ma
                  )
              )::real / (bs - 24)
          ) AS pure_page_count
      FROM (
          SELECT
              pg_catalog.pg_class.oid AS class_oid,
              reltuples,
              23 AS header_width, 8 AS ma,
              current_setting('block_size')::integer AS bs,
              pg_catalog.pg_relation_size(pg_catalog.pg_class.oid) AS size,
              coalesce(
                  (
                      SELECT (
                          regexp_matches(
                              reloptions::text, E'.*fillfactor=(\\\\d+).*'))[1]),
                  '100')::real AS fillfactor
          FROM pg_catalog.pg_class
          WHERE pg_catalog.pg_class.oid = '$self->{'_ident'}'::regclass
      ) AS const
      LEFT JOIN pg_catalog.pg_statistic ON starelid = class_oid
      GROUP BY bs, class_oid, fillfactor, ma, size, reltuples, header_width
  ) AS sq
  SQL
  			);
  	}
  
  	return {
  		'effective_page_count' => $result->[0]->[0],
  		'free_percent' => (defined $result->[0]->[1] and
  						   $result->[0]->[1] > 0) ? $result->[0]->[1] : 0,
  		'free_space' => (defined $result->[0]->[2] and
  						 $result->[0]->[2] > 0) ? $result->[0]->[2] : 0};
  }
  
  sub _get_size_statistics {
  	my $self = shift;
  
  	my $result = $self->_execute_and_log(
  		sql => <<SQL
  SELECT
      size,
      total_size,
      ceil(size::real / bs) AS page_count,
      ceil(total_size::real / bs) AS total_page_count
  FROM (
      SELECT
          current_setting('block_size')::integer AS bs,
          pg_catalog.pg_relation_size('$self->{'_ident'}') AS size,
          pg_catalog.pg_total_relation_size('$self->{'_ident'}') AS total_size
  ) AS sq
  SQL
  		);
  
  	return {
  		'size' => $result->[0]->[0],
  		'total_size' => $result->[0]->[1],
  		'page_count' => $result->[0]->[2],
  		'total_page_count' => $result->[0]->[3]};
  }
  
  sub _do_vacuum {
  	my ($self, %arg_hash) = @_;
  
  	$self->_execute_and_log(
  		sql => ('VACUUM '.($arg_hash{'analyze'} ? 'ANALYZE ' : '').
  				$self->{'_ident'}));
  
  	return;
  }
  
  sub _do_analyze {
  	my ($self, %arg_hash) = @_;
  
  	$self->_execute_and_log(sql => 'ANALYZE '.$self->{'_ident'});
  
  	return;
  }
  
  sub _get_update_column {
  	my $self = shift;
  
  	my $result = $self->_execute_and_log(
  		sql => <<SQL
  SELECT attname
  FROM pg_catalog.pg_attribute
  WHERE
      attnum > 0 AND -- neither system
      NOT attisdropped AND -- nor dropped
      attrelid = '$self->{'_ident'}'::regclass
  ORDER BY
      -- Variable legth attributes have lower priority because of the chance
      -- of being toasted
      (attlen = -1),
      -- Preferably not indexed attributes
      (
          attnum::text IN (
              SELECT regexp_split_to_table(indkey::text, ' ')
              FROM pg_catalog.pg_index
              WHERE indrelid = '$self->{'_ident'}'::regclass)),
      -- Preferably smaller attributes
      attlen,
      attnum
  LIMIT 1;
  SQL
  		);
  
  	return $result->[0]->[0];
  }
  
  sub _clean_pages {
  	my ($self, %arg_hash) = @_;
  
  	my $result = $self->_execute_and_log(
  		level => 'debug1',
  		sql => <<SQL
  SELECT public._clean_pages(
      '$self->{'_ident'}', '$arg_hash{'column_ident'}', $arg_hash{'to_page'},
      $arg_hash{'pages_per_round'}, $arg_hash{'max_tupples_per_page'})
  SQL
  		);
  
  	return $result->[0]->[0];
  }
  
  sub _get_index_data_list {
  	my $self = shift;
  
  	my $result = $self->_execute_and_log(
  		sql => <<SQL
  SELECT
      indexname, tablespace, indexdef,
      regexp_replace(indexdef, E'.* USING (\\\\w+) .*', E'\\\\1') AS indmethod,
      conname,
      CASE
          WHEN contype = 'p' THEN 'PRIMARY KEY'
          WHEN contype = 'u' THEN 'UNIQUE'
          ELSE NULL END AS contypedef,
      (
          SELECT
              bool_and(
                  deptype IN ('n', 'a', 'i') AND
                  NOT (refobjid = indexoid AND deptype = 'n') AND
                  NOT (
                      objid = indexoid AND deptype = 'i' AND
                      (version < array[9,1] OR contype NOT IN ('p', 'u'))))
          FROM pg_catalog.pg_depend
          LEFT JOIN pg_catalog.pg_constraint ON
              pg_catalog.pg_constraint.oid = refobjid
          WHERE objid = indexoid OR refobjid = indexoid
      )::integer AS allowed,
      pg_catalog.pg_relation_size(indexoid)
  FROM (
      SELECT
          indexname, tablespace, indexdef,
          (
              quote_ident(schemaname) || '.' ||
              quote_ident(indexname))::regclass AS indexoid,
          string_to_array(
              regexp_replace(
                  version(), E'.*PostgreSQL (\\\\d+\\\\.\\\\d+).*', E'\\\\1'),
              '.')::integer[] AS version
      FROM pg_catalog.pg_indexes
      WHERE
          schemaname = '$self->{'_schema_name'}' AND
          tablename = '$self->{'_table_name'}'
  ) AS sq
  LEFT JOIN pg_catalog.pg_constraint ON
      conindid = indexoid AND contype IN ('p', 'u')
  ORDER BY 8;
  SQL
  		);
  
  	return [
  		map(
  			{'name' => $_->[0],
  			 'tablespace' => $_->[1],
  			 'definition' => $_->[2],
  			 'method' => $_->[3],
  			 'conname' => $_->[4],
  			 'contypedef' => $_->[5],
  			 'allowed' => $_->[6]},
  			@{$result})];
  }
  
  sub _get_index_size_statistics {
  	my ($self, %arg_hash) = @_;
  
  	my $result = $self->_execute_and_log(
  		sql => <<SQL
  SELECT size, ceil(size / bs) AS page_count
  FROM (
      SELECT
          pg_catalog.pg_relation_size('$arg_hash{'ident'}'::regclass) AS size,
          current_setting('block_size')::real AS bs
  ) AS sq
  SQL
  		);
  
  	return {
  		'size' => $result->[0]->[0],
  		'page_count' => $result->[0]->[1]};
  }
  
  sub _get_index_bloat_statistics {
  	my ($self, %arg_hash) = @_;
  
  	my $result = $self->_execute_and_log(
  		sql => <<SQL
  SELECT
      CASE
          WHEN avg_leaf_density = 'NaN' THEN 0
          ELSE
              round(
                  (100 * (1 - avg_leaf_density / fillfactor))::numeric, 2
              )
          END AS free_percent,
      CASE
          WHEN avg_leaf_density = 'NaN' THEN 0
          ELSE
              ceil(
                  index_size * (1 - avg_leaf_density / fillfactor)
              )
          END AS free_space
  FROM (
      SELECT
          coalesce(
              (
                  SELECT (
                      regexp_matches(
                          reloptions::text, E'.*fillfactor=(\\\\d+).*'))[1]),
              '90')::real AS fillfactor,
          pgsi.*
      FROM pg_catalog.pg_class
      CROSS JOIN $self->{'_pgstattuple_schema_ident'}.pgstatindex(
          '$arg_hash{'ident'}') AS pgsi
      WHERE pg_catalog.pg_class.oid = '$arg_hash{'ident'}'::regclass
  ) AS oq
  SQL
  		);
  
  	return {
  		'free_percent' => $result->[0]->[0],
  		'free_space' => $result->[0]->[1]};
  }
  
  sub _get_reindex_query {
  	my ($self, %arg_hash) = @_;
  
  	my $sql = $arg_hash{'data'}->{'definition'};
  	$sql =~ s/INDEX (\S+)/INDEX CONCURRENTLY pgcompactor_tmp$$/;
  	if (defined $arg_hash{'data'}->{'tablespace'}) {
  		$sql =~ s/(WHERE .*)?$/TABLESPACE $arg_hash{'data'}->{'tablespace'} $1/;
  	}
  	$sql .= ';';
  
  	return $sql;
  
  }
  
  sub _get_alter_index_query {
  	my ($self, %arg_hash) = @_;
  
  	my $schema_ident = $self->{'_database'}->quote_ident(
  		string => $self->{'_schema_name'});
  	my $index_ident = $self->{'_database'}->quote_ident(
  		string => $arg_hash{'data'}->{'name'});
  	my $constraint_ident;
  	if ($arg_hash{'data'}->{'conname'}) {
  		$constraint_ident = $self->{'_database'}->quote_ident(
  			string => $arg_hash{'data'}->{'conname'});
  	}
  
  	return
  		'BEGIN; '.
  		($constraint_ident ?
  		 ('ALTER TABLE '.$self->{'_ident'}.
  		  ' DROP CONSTRAINT '.$constraint_ident.'; '.
  		  'ALTER TABLE '.$self->{'_ident'}.
  		  ' ADD CONSTRAINT '.$constraint_ident.' '.
  		  $arg_hash{'data'}->{'contypedef'}.
  		  ' USING INDEX pgcompactor_tmp'.$$.'; ') :
  		 ('DROP INDEX '.$schema_ident.'.'.$index_ident.'; '.
  		  'ALTER INDEX '.$schema_ident.'.pgcompactor_tmp'.$$.
  		  ' RENAME TO '.$index_ident.'; ')
  		).'END;';
  }
  
  sub _get_straight_reindex_query {
  	my ($self, %arg_hash) = @_;
  
  	my $schema_ident = $self->{'_database'}->quote_ident(
  		string => $self->{'_schema_name'});
  	my $index_ident = $self->{'_database'}->quote_ident(
  		string => $arg_hash{'data'}->{'name'});
  
  	return 'REINDEX '.$schema_ident.'.'.$index_ident.';';
  
  }
  
  sub _reindex {
  	my ($self, %arg_hash) = @_;
  
  	$self->_execute_and_log(
  		sql => $self->_get_reindex_query(data => $arg_hash{'data'}));
  
  	return;
  }
  
  sub _alter_index {
  	my ($self, %arg_hash) = @_;
  
  	$self->_execute_and_log(
  		sql => $self->_get_alter_index_query(data => $arg_hash{'data'}));
  
  	return;
  }
  
  sub _get_pages_per_round {
  	my ($self, %arg_hash) = @_;
  
  	my $result = ceil(
  		(sort {$a <=> $b}
  		 (sort {$b <=> $a}
  		  $arg_hash{'page_count'} /
  		  $self->{'_pages_per_round_divisor'},
  		  1)[0],
  		 $self->{'_max_pages_per_round'})[0]);
  
  	$result = (sort {$a <=> $b} $result, $arg_hash{'to_page'})[0];
  
  	return $result;
  }
  
  sub _get_pages_before_vacuum {
  	my ($self, %arg_hash) = @_;
  
  	return ceil(
  		(sort {$b <=> $a}
  		 (sort {$a <=> $b}
  		  $arg_hash{'page_count'} /
  		  $self->{'_pages_before_vacuum_lower_divisor'},
  		  $self->{'_pages_before_vacuum_lower_threshold'})[0],
  		 $arg_hash{'expected_page_count'} /
  		 $self->{'_pages_before_vacuum_upper_divisor'},
  		 1)[0]);
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<PgToolkit::Class>
  =item L<PgToolkit::Utils>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_COMPACTOR_TABLE

$fatpacked{"PgToolkit/Database.pm"} = <<'PGTOOLKIT_DATABASE';
  package PgToolkit::Database;
  
  use base qw(PgToolkit::Class);
  
  use strict;
  use warnings;
  
  use POSIX;
  use Time::HiRes qw(time sleep);
  
  =head1 NAME
  
  B<PgToolkit::Database> - a database abstract class.
  
  =head1 SYNOPSIS
  
  	package SomeDatabase;
  
  	use base qw(PgToolkit::Database);
  
  	sub init {
  		my ($self, %arg_hash) = @_;
  
  		$self->SUPER::init(%arg_hash);
  
  		# some initialization
  
  		return;
  	}
  
  	sub _execute {
  		# some implementation
  	}
  
  	1;
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Database> is a base class for database adapters.
  
  =head3 Constructor arguments
  
  =over 4
  
  =item C<dbname>
  
  =back
  
  =cut
  
  sub init {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_dbname'} = $arg_hash{'dbname'};
  
  	return;
  }
  
  =head1 METHODS
  
  =head2 B<execute()>
  
  Executes an SQL.
  
  The method _execute() must be implemented in derivative classes.
  
  =head3 Arguments
  
  =over 4
  
  =item C<sql>
  
  an SQL string.
  
  =back
  
  =head3 Returns
  
  An array of arrays representing the result.
  
  =head3 Throws
  
  =over 4
  
  =item C<DatabaseError>
  
  when the database raised an error during execution of the SQL.
  
  =back
  
  =cut
  
  sub execute {
  	my ($self, %arg_hash) = @_;
  
  	my $time = $self->_time();
  	my $result = $self->_execute(%arg_hash);
  	$self->{'_duration'} = $self->_time() - $time;
  
  	return $result;
  }
  
  sub _execute {
  	die('NotImplementedError');
  }
  
  =head2 B<get_duration()>
  
  Returns a duration of the last query.
  
  =head3 Returns
  
  A high resolution time in seconds.
  
  =cut
  
  sub get_duration {
  	my $self = shift;
  
  	return $self->{'_duration'};
  }
  
  =head2 B<get_adapter_name()>
  
  Returns the name of the adapter.
  
  This method must be implemented in derivative classes.
  
  =head3 Returns
  
  A string representing the name.
  
  =cut
  
  sub get_adapter_name {
  	die('NotImplementedError');
  }
  
  =head2 B<get_dbname()>
  
  Returns the database name.
  
  =head3 Returns
  
  A string with the name.
  
  =cut
  
  sub get_dbname {
  	my $self = shift;
  
  	return $self->{'_dbname'};
  }
  
  =head2 B<_get_escaped_dbname()>
  
  Returns an escaped database name.
  
  =head3 Returns
  
  A database name string with all the non-word characters escaped.
  
  =cut
  
  sub _get_escaped_dbname {
  	my $self = shift;
  
  	my $result = $self->{'_dbname'};
  	$result =~ s/(\W)/\\$1/g;
  
  	return $result;
  }
  
  =head2 B<quote_ident()>
  
  =head3 Arguments
  
  =over 4
  
  =item C<string>
  
  =back
  
  =head3 Returns
  
  A quoted indentifier string.
  
  =head3 Throws
  
  =over 4
  
  =item C<DatabaseError>
  
  when nothing to ident.
  
  =back
  
  =cut
  
  sub quote_ident {
  	my ($self, %arg_hash) = @_;
  
  	if (not $arg_hash{'string'}) {
  		die('DatabaseError Nothing to ident.');
  	}
  
  	return $self->_quote_ident(%arg_hash);
  }
  
  sub _quote_ident {
  	my ($self, %arg_hash) = @_;
  
  	my $result = $self->execute(
  		sql => 'SELECT quote_ident(\''.$arg_hash{'string'}.'\')');
  
  	return $result->[0]->[0];
  }
  
  sub _time {
  	return time();
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<PgToolkit::Class>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_DATABASE

$fatpacked{"PgToolkit/Database/Dbi.pm"} = <<'PGTOOLKIT_DATABASE_DBI';
  package PgToolkit::Database::Dbi;
  
  use base qw(PgToolkit::Database);
  
  use strict;
  use warnings;
  
  =head1 NAME
  
  B<PgToolkit::Database::Dbi> - a DBI facade class.
  
  =head1 SYNOPSIS
  
  	my $database = PgToolkit::Database::Dbi->new(
  		driver => 'Pg', host => 'somehost', port => '5432',
  		dbname => 'somedb', user => 'someuser', password => 'secret',
  		set_hash => {'statement_timeout' => 0});
  
  	my $result = $database->execute(sql => 'SELECT * FROM sometable;');
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Database::Dbi> is a simplification of the DBI interface.
  
  =head3 Constructor arguments
  
  =over 4
  
  =item C<driver>
  
  =item C<host>
  
  by default socket connection is used,
  
  =item C<port>
  
  =item C<dbname>
  
  =item C<user>
  
  =item C<password>
  
  =item C<set_hash>
  
  a set of configuration parameters to set.
  
  =back
  
  For default argument values see the specific B<DBD::*> driver
  documentation.
  
  =head3 Throws
  
  =over 4
  
  =item C<DatabaseError>
  
  when either the DBI module or the specified driver not found.
  
  =back
  
  =cut
  
  sub init {
  	my ($self, %arg_hash) = @_;
  
  	$self->SUPER::init(%arg_hash);
  
  	$self->{'_driver'} = $arg_hash{'driver'};
  
  	eval { require DBI; };
  	if ($@) {
  		die('DatabaseError DBI module not found.');
  	}
  
  	if (not grep($_ eq $arg_hash{'driver'}, DBI->available_drivers())) {
  		die('DatabaseError No driver found "'.$arg_hash{'driver'}.'".');
  	}
  
  	$self->{'dbh'} = DBI->connect(
  		'dbi:'.$arg_hash{'driver'}.
  		':dbname='.($arg_hash{'dbname'} ? $self->_get_escaped_dbname() : '').
  		(defined $arg_hash{'host'} ? ';host='.$arg_hash{'host'} : '').
  		';port='.($arg_hash{'port'} or ''),
  		$arg_hash{'user'}, $arg_hash{'password'},
  		{
  			 RaiseError => 1, ShowErrorStatement => 1, AutoCommit => 1,
  			 PrintWarn => 0, PrintError => 0,
  			 pg_server_prepare => 0, pg_enable_utf8 => 0
  		});
  
  	if ($arg_hash{'set_hash'}) {
  		$self->execute(
  			sql => join(
  				' ',
  				map(
  					'SET '.$_.' TO '.$arg_hash{'set_hash'}->{$_}.';',
  					keys %{$arg_hash{'set_hash'}})));
  	}
  
  	return;
  }
  
  =head1 METHODS
  
  =head2 B<execute()>
  
  Executes an SQL.
  
  =head3 Arguments
  
  =over 4
  
  =item C<sql>
  
  an SQL string.
  
  =back
  
  =head3 Returns
  
  An array of arrays representing the result.
  
  =head3 Throws
  
  =over 4
  
  =item C<DatabaseError>
  
  when the database raised an error during execution of the SQL.
  
  =back
  
  =cut
  
  sub _execute {
  	my ($self, %arg_hash) = @_;
  
  	my $result;
  	eval {
  		if ($arg_hash{'sql'} =~ /^SELECT/) {
  			$self->{'sth'} = $self->{'dbh'}->prepare($arg_hash{'sql'});
  			$self->{'sth'}->execute();
  			$result = $self->{'sth'}->fetchall_arrayref();
  		} else {
  			$self->{'dbh'}->do($arg_hash{'sql'});
  		}
  	};
  	if ($@) {
  		die('DatabaseError '.$@);
  	}
  
  	return $result
  }
  
  =head2 B<get_adapter_name()>
  
  Returns the name of the adapter.
  
  =head3 Returns
  
  A string representing the name.
  
  =cut
  
  sub get_adapter_name {
  	my $self = shift;
  
  	return 'DBI/'.$self->{'_driver'};
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<DBI>
  
  =item L<PgToolkit::Database>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_DATABASE_DBI

$fatpacked{"PgToolkit/Database/Psql.pm"} = <<'PGTOOLKIT_DATABASE_PSQL';
  package PgToolkit::Database::Psql;
  
  use base qw(PgToolkit::Database);
  
  use strict;
  use warnings;
  
  use IPC::Open3;
  
  =head1 NAME
  
  B<PgToolkit::Database::Psql> - a psql facade class.
  
  =head1 SYNOPSIS
  
  	my $database = PgToolkit::Database::Psql->new(
  		path => '/path/to/psql', host => 'somehost', port => '5432',
  		dbname => 'somedb', user => 'someuser',password => 'secret',
  		set_hash => {'statement_timeout' => 0});
  
  	my $result = $database->execute(sql => 'SELECT * FROM sometable;');
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Database::Psql> is a psql utility adaptation class.
  
  =head3 Constructor arguments
  
  =over 4
  
  =item C<path>
  
  a path to psql, default 'psql'
  
  =item C<host>
  
  =item C<port>
  
  =item C<dbname>
  
  =item C<user>
  
  =item C<password>
  
  =item C<set_hash>
  
  a set of configuration parameters to set.
  
  =back
  
  For default argument values see the specific the psql documentation.
  
  =head3 Throws
  
  =over 4
  
  =item C<DatabaseError>
  
  if can not run psql.
  
  =back
  
  =cut
  
  sub init {
  	my ($self, %arg_hash) = @_;
  
  	$self->SUPER::init(%arg_hash);
  
  	my %opt_hash = ();
  	$opt_hash{'password'} = (defined $arg_hash{'password'}) ?
  		'PGPASSWORD='.$arg_hash{'password'}.' ' : '';
  	$opt_hash{'path'} = (defined $arg_hash{'path'}) ?
  		$arg_hash{'path'} : 'psql';
  	$opt_hash{'host'} = (defined $arg_hash{'host'}) ?
  		'-h '.$arg_hash{'host'} : '';
  	$opt_hash{'port'} = (defined $arg_hash{'port'}) ?
  		'-p '.$arg_hash{'port'} : '';
  	$opt_hash{'dbname'} = (defined $arg_hash{'dbname'}) ?
  		'-d '.$self->_get_escaped_dbname() : '';
  	$opt_hash{'user'} = (defined $arg_hash{'user'}) ?
  		'-U '.$arg_hash{'user'} : '';
  
  	$self->{'_set_hash'} = $arg_hash{'set_hash'};
  
  	$self->{'_command'} = sprintf(
  		'%s%s -q -A -t -X %s %s %s %s -P null="<NULL>"',
  		@opt_hash{'password', 'path', 'host', 'port', 'dbname', 'user'});
  	$self->{'_command'} =~ s/\s+/ /g;
  
  	eval {
  		$self->_run_psql(
  			command => $self->{'_command'}, sql => 'SELECT 1;');
  	};
  	if ($@) {
  		if ($@ =~ 'DatabaseError') {
  			die('DatabaseError Can not run psql.');
  		} else {
  			die($@);
  		}
  	}
  
  	return;
  }
  
  =head1 METHODS
  
  =head2 B<execute()>
  
  Executes an SQL.
  
  =head3 Arguments
  
  =over 4
  
  =item C<sql>
  
  an SQL string.
  
  =back
  
  =head3 Returns
  
  An array of arrays representing the result.
  
  =head3 Throws
  
  =over 4
  
  =item C<DatabaseError>
  
  when problems appear during statement execution.
  
  =back
  
  =cut
  
  sub _execute {
  	my ($self, %arg_hash) = @_;
  
  	my $sql = join(
  		' ',
  		map(
  			'SET '.$_.' TO '.$self->{'_set_hash'}->{$_}.';',
  			keys %{$self->{'_set_hash'}}),
  		$arg_hash{'sql'});
  
  	my $raw_data = $self->_run_psql(
  		command => $self->{'_command'}, sql => $sql);
  
  	my $result = [];
  	for my $row_data (split(qr/\n/, $raw_data)) {
  		my $row = [];
  		for my $cell_data (split(qr/\|/, $row_data)) {
  			my $cell = ($cell_data eq '<NULL>') ? undef : $cell_data;
  			push(@{$row}, $cell);
  		}
  		push(@{$result}, $row)
  	}
  
  	return $result;
  }
  
  =head2 B<get_adapter_name()>
  
  Returns the name of the adapter.
  
  =head3 Returns
  
  A string representing the name.
  
  =cut
  
  sub get_adapter_name {
  	return 'psql';
  }
  
  sub _run_psql {
  	my ($self, %arg_hash) = @_;
  
  	my $pid = open3(\*CHLD_IN, \*CHLD_OUT, \*CHLD_ERR, $arg_hash{'command'});
  	print CHLD_IN $arg_hash{'sql'};
  	close CHLD_IN;
  	waitpid($pid, 0);
  	my $exit_status = $? >> 8;
  
  	my $err_output = join('', <CHLD_ERR>);
  
  	if ($exit_status or ($err_output and $err_output =~ /^ERROR: /)) {
  		die(join("\n", ('DatabaseError Can not execute the command',
  						$arg_hash{'command'}, $arg_hash{'sql'},
  						join('', <CHLD_OUT>), $err_output)));
  	}
  
  	return join('', <CHLD_OUT>);
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<IPC::Open3>
  
  =item L<PgToolkit::Database>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_DATABASE_PSQL

$fatpacked{"PgToolkit/DatabaseChooser.pm"} = <<'PGTOOLKIT_DATABASECHOOSER';
  package PgToolkit::DatabaseChooser;
  
  use strict;
  use warnings;
  
  =head1 NAME
  
  B<PgToolkit::DatabaseChooser> - a database factory class.
  
  =head1 SYNOPSIS
  
  	my $database = PgToolkit::DatabaseChooser->new(
  		constructor_list => [
  			sub { SomeDatabase->new(...); },
  			sub { AnotherDatabase->new(...); }]);
  
  =head1 DESCRIPTION
  
  B<PgToolkit::DatabaseChooser> a factory class for databases.
  
  It accepts a database constructor list and sequentially tries to run
  them. The first successfull adapter is returned.
  
  =head3 Constructor arguments
  
  =over 4
  
  =item C<constructor_list>
  
  =back
  
  =head3 Throws
  
  =over 4
  
  =item C<DatabaseChooserError>
  
  if can not find an adapter.
  
  =back
  
  =cut
  
  sub new {
  	my ($class, %arg_hash) = @_;
  
  	my $self;
  
  	for my $constructor (@{$arg_hash{'constructor_list'}}) {
  		eval {
  			$self = $constructor->();
  		};
  		if ($@) {
  			if ($@ !~ 'DatabaseError') {
  				die($@)
  			}
  		} else {
  			last;
  		}
  	}
  
  	if (not defined $self) {
  		die(join("", ('DatabaseChooserError Can not find an adapter.')));
  	}
  
  	return $self;
  }
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_DATABASECHOOSER

$fatpacked{"PgToolkit/Logger.pm"} = <<'PGTOOLKIT_LOGGER';
  package PgToolkit::Logger;
  
  use base qw(PgToolkit::Class);
  
  use strict;
  use warnings;
  
  =head1 NAME
  
  B<PgToolkit::Logger> - a logging facility class.
  
  =head1 SYNOPSIS
  
  	my $logger = PgToolkit::Logger->new(level => 'info');
  	$logger->write(message => 'Some message', level => 'warning');
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Logger> is a class implementing simple multilevel message
  logging logic.
  
  =head3 Constructor arguments
  
  =over 4
  
  =item C<out_handle>
  
  an output filehandle, by default C<*STDOUT>
  
  =item C<err_handle>
  
  an error filehandle, by default C<*STDERR>, all the C<error> and
  C<warning> messages are written to this filehandle
  
  =item C<level>
  
  a minimum logging level, allowed symbols are C<error>, C<warning>,
  C<notice>, C<info>, C<debug0> and C<debug1>.
  
  =back
  
  =head3 Throws
  
  =over 4
  
  =item C<LoggerError>
  
  when wrong logging level is specified.
  
  =back
  
  =cut
  
  sub init {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_out_handle'} =
  		exists $arg_hash{'out_handle'} ? $arg_hash{'out_handle'} : \*STDOUT;
  	$self->{'_err_handle'} =
  		exists $arg_hash{'err_handle'} ? $arg_hash{'err_handle'} : \*STDERR;
  	$self->{'_level_code'} = $self->_get_level_code(
  		level => $arg_hash{'level'});
  
  	# Setting autoflush
  	{
  		my $default = select();
  		select($self->{'_out_handle'});
  		$| = 1;
  		select($self->{'_err_handle'});
  		$| = 1;
  		select($default);
  	}
  
  	if (not defined $self->{'_level_code'}) {
  		die('LoggerError Wrong logging level "'.$arg_hash{'level'}.
  			'" is specified in initialization.');
  	}
  
  	return;
  }
  
  =head1 METHODS
  
  =head2 B<write()>
  
  Loggs a message.
  
  =head3 Arguments
  
  =over 4
  
  =item C<message>
  
  =item C<level>
  
  the level of the message, allowed symbols are C<error>, C<warning>,
  C<notice>, C<info>, C<debug0> and C<debug1>
  
  =item C<target>
  
  an name related to the log entry, empty by default.
  
  =back
  
  =head3 Throws
  
  =over 4
  
  =item C<LoggerError>
  
  when wrong logging level is specified.
  
  =back
  
  =cut
  
  sub write {
  	my ($self, %arg_hash) = @_;
  
  	my $level_code = $self->_get_level_code(level => $arg_hash{'level'});
  
  	if (not defined $level_code) {
  		die('LoggerError Wrong logging level "'.$arg_hash{'level'}.
  			'" is specified in write.');
  	}
  
  	if ($level_code <= $self->{'_level_code'}) {
  		print(
  			{$level_code > 0 ? $self->{'_out_handle'} : $self->{'_err_handle'}}
  			scalar(localtime()).' '.
  			(defined $arg_hash{'target'} ? $arg_hash{'target'}.' ' : '').
  			uc($arg_hash{'level'}).' '.
  			$arg_hash{'message'}."\n");
  	}
  
  	return;
  }
  
  sub _get_level_code {
  	my ($self, %arg_hash) = @_;
  
  	my $level = {
  		'error' => -1,
  		'warning' => 0,
  		'notice' => 1,
  		'info' => 2,
  		'debug0' => 3,
  		'debug1' => 4
  	}->{$arg_hash{'level'}};
  
  	return $level;
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<PgToolkit::Class>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_LOGGER

$fatpacked{"PgToolkit/Options.pm"} = <<'PGTOOLKIT_OPTIONS';
  package PgToolkit::Options;
  
  use base qw(PgToolkit::Class);
  
  use strict;
  use warnings;
  
  use Getopt::Long qw(:config bundling no_ignore_case);
  use Pod::Usage ();
  
  =head1 NAME
  
  B<PgToolkit::Options> - a generic ad-hoc options parsing and processing
  class.
  
  =head1 SYNOPSIS
  
  	my $options = PgToolkit::Options->new(
  		definition_hash => {'quiet|q' => 0, 'verbosity|v:s' => 'notice'},
  		error_check_code => sub {
  			my $option_hash = shift;
  			return (
  				exists $option_hash->{'quiet'} and
  				exists $option_hash->{'verbosity'});
  		},
  		transform_code => sub {
  			my $option_hash = shift;
  			if (exists $option_hash->{'quiet'}) {
  				$option_hash->{'verbosity'} = 'warning';
  			}
  		});
  
  	my $verbosity = $options->get(name => 'verbosity');
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Options> encapsulates generic options, default values and
  interdependencies mechanisms. The 'help|?' option is implemented by
  default.
  
  =head3 Constructor arguments
  
  =over 4
  
  =item C<argv>
  
  an options listref, by default C<\@ARGV>
  
  =item C<out_handle>
  
  an ouptut filehandle, by default C<*STDOUT>
  
  =item C<definition_hash>
  
  an option definitions as keys and default values as values
  
  =item C<error_check_code>
  
  an error checker code reference supplied with an option hash reference
  as argument and expected to return either error message or undef
  
  =item C<transform_code>
  
  if you need to do some manipulations with options do it inside this
  code.
  
  =back
  
  =head3 Throws
  
  =over 4
  
  =item OptionsError
  
  when an option definitions does not meet naming conditions.
  
  =back
  
  =cut
  
  sub init {
  	my ($self, %arg_hash) = @_;
  
  	$self->{'_out_handle'} =
  		exists $arg_hash{'out_handle'} ? $arg_hash{'out_handle'} : \*STDOUT;
  	$self->{'_argv'} =
  		exists $arg_hash{'argv'} ? $arg_hash{'argv'} : \@ARGV;
  
  	my $default_hash = {};
  	for my $key (keys %{$arg_hash{'definition_hash'}}) {
  		if ($key =~ /(.*?)\|/) {
  			$default_hash->{$1} = $arg_hash{'definition_hash'}->{$key};
  		} else {
  			die('OptionsError Wrong definition "'.$key.'".');
  		}
  	}
  
  	my $option_hash = {};
  	my $error;
  	{
  		local @ARGV = @{$self->{'_argv'}};
  
  		local $SIG{__WARN__} = sub {
  			$error = shift;
  			$error =~ s/\n//g;
  		};
  
  		Getopt::Long::GetOptions(
  			$option_hash, 'help|?', 'man|m', 'version|V',
  			(keys %{$arg_hash{'definition_hash'}}));
  	}
  
  	$0 =~ /.*\/(.*?)$/;
  	my $prog_name = $1;
  
  	if (
  		not (
  			exists $option_hash->{'help'} or exists $option_hash->{'man'} or
  			exists $option_hash->{'version'})) {
  		if (not $error and defined $arg_hash{'error_check_code'}) {
  			$error = $arg_hash{'error_check_code'}->($option_hash);
  		}
  
  		if ($error) {
  			$error = (
  				$prog_name.': '.$error."\n".'Try --help for short help, '.
  				'--man for full manual.');
  		}
  	}
  
  	if (defined $arg_hash{'transform_code'}) {
  		$arg_hash{'transform_code'}->($option_hash);
  	}
  
  	if ($error) {
  		$self->_print_help(
  			out_handle_specified => exists $arg_hash{'out_handle'},
  			result => 2,
  			message => $error);
  	} elsif ($option_hash->{'help'}) {
  		$self->_print_help(
  			out_handle_specified => exists $arg_hash{'out_handle'},
  			result => 1,
  			sections => 'NAME|SYNOPSIS');
  	} elsif ($option_hash->{'man'}) {
  		$self->_print_help(
  			out_handle_specified => exists $arg_hash{'out_handle'},
  			result => 1,
  			sections => (
  				'NAME|SYNOPSIS|DESCRIPTION|OPTIONS|'.
  				'LICENSE AND COPYRIGHT|VERSION|AUTHOR'));
  	} elsif ($option_hash->{'version'}) {
  		$self->_print_help(
  			out_handle_specified => exists $arg_hash{'out_handle'},
  			result => 1,
  			message => (
  				$prog_name.' ('.$arg_hash{'kit'}.') '.
  				$arg_hash{'version'}));
  	}
  
  	$self->{'_option_hash'} = {
  		'help' => 0, 'man' => 0, 'version' => 0, %{$default_hash},
  		%{$option_hash}};
  
  	return;
  }
  
  sub _print_help {
  	my ($self, %arg_hash) = @_;
  
  	my ($output, $exitval) =
  		$arg_hash{'out_handle_specified'} ?
  		($self->{'_out_handle'}, 'NOEXIT') :
  		(undef, $arg_hash{'result'});
  
  	Pod::Usage::pod2usage(
  		-message => $arg_hash{'message'},
  		-verbose => 99,
  		-output => $output,
  		-exitval => $exitval,
  		-sections => (
  			 exists $arg_hash{'sections'} ? $arg_hash{'sections'} : ['_']));
  }
  
  =head1 METHODS
  
  =head2 B<get()>
  
  Returns an option.
  
  =head3 Arguments
  
  =over 4
  
  =item C<name>
  
  =back
  
  =head3 Returns
  
  The value for the name.
  
  =head3 Throws
  
  =over 4
  
  =item C<CompactorOptionsError>
  
  when wrong name supplied in get.
  
  =back
  
  =cut
  
  sub get {
  	my ($self, %arg_hash) = @_;
  
  	if (not exists $self->{'_option_hash'}->{$arg_hash{'name'}}) {
  		die('OptionsError Wrong name "'.$arg_hash{'name'}.
  			'" is supplied in get.');
  	}
  
  	return $self->{'_option_hash'}->{$arg_hash{'name'}};
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<Getopt::Long>
  
  =item L<PgToolkit::Class>
  
  =item L<Pod::Usage>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_OPTIONS

$fatpacked{"PgToolkit/Registry/Compactor.pm"} = <<'PGTOOLKIT_REGISTRY_COMPACTOR';
  package PgToolkit::Registry::Compactor;
  
  use base qw(PgToolkit::Class);
  
  use strict;
  use warnings;
  
  use PgToolkit::Compactor::Cluster;
  use PgToolkit::Compactor::Database;
  use PgToolkit::Compactor::Table;
  use PgToolkit::Database::Dbi;
  use PgToolkit::Database::Psql;
  use PgToolkit::DatabaseChooser;
  use PgToolkit::Logger;
  use PgToolkit::Options;
  
  =head1 NAME
  
  B<PgToolkit::Registry::Compactor> - a registry of the compactor components.
  
  =head1 SYNOPSIS
  
  	my $registry = PgToolkit::Registry::Compactor->new();
  
  	$registry->get_cluster_compactor()->process();
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Registry::Compactor> is a registry class that implements all the
  services and their relationships that compactor tool uses.
  
  =cut
  
  =head1 METHODS
  
  =head2 B<get_cluster_compactor()>
  
  A cluster compactor prototype service.
  
  =cut
  
  sub get_cluster_compactor {
  	my $self = shift;
  
  	my $options = $self->get_options();
  
  	return PgToolkit::Compactor::Cluster->new(
  		database_constructor => sub {
  			my %arg_hash = @_;
  			return $self->get_database_adapter(
  				dbname => $arg_hash{'dbname'});
  		},
  		logger => $self->get_logger(),
  		dry_run => $options->get(name => 'dry-run'),
  		database_compactor_constructor => sub {
  			my %arg_hash = @_;
  			return $self->get_database_compactor(
  				database => $arg_hash{'database'});
  		},
  		dbname_list => $options->get(name => 'dbname'),
  		excluded_dbname_list => $options->get(name => 'exclude-dbname'),
  		max_retry_count => $options->get(name => 'max-retry-count'));
  }
  
  =head2 B<get_database_compactor()>
  
  A database compactor prototype service.
  
  =cut
  
  sub get_database_compactor {
  	my ($self, %arg_hash) = @_;
  
  	my $options = $self->get_options();
  
  	return PgToolkit::Compactor::Database->new(
  		database => $arg_hash{'database'},
  		logger => $self->get_logger(),
  		dry_run => $options->get(name => 'dry-run'),
  		table_compactor_constructor => sub {
  			my %arg_hash = @_;
  			return $self->get_table_compactor(
  				database => $arg_hash{'database'},
  				schema_name => $arg_hash{'schema_name'},
  				table_name => $arg_hash{'table_name'},
  				pgstattuple_schema_name => (
  					$arg_hash{'pgstattuple_schema_name'}));
  		},
  		schema_name_list => $options->get(name => 'schema'),
  		excluded_schema_name_list => $options->get(name => 'exclude-schema'),
  		table_name_list => $options->get(name => 'table'),
  		excluded_table_name_list => $options->get(name => 'exclude-table'),
  		no_pgstatuple => $options->get(name => 'no-pgstattuple'));
  }
  
  =head2 B<get_table_compactor()>
  
  A table compactor prototype service.
  
  =cut
  
  sub get_table_compactor {
  	my ($self, %arg_hash) = @_;
  
  	my $options = $self->get_options();
  
  	return PgToolkit::Compactor::Table->new(
  		database => $arg_hash{'database'},
  		logger => $self->get_logger(),
  		dry_run => $options->get(name => 'dry-run'),
  		schema_name => $arg_hash{'schema_name'},
  		table_name => $arg_hash{'table_name'},
  		min_page_count => $options->get(name => 'min-page-count'),
  		min_free_percent => $options->get(name => 'min-free-percent'),
  		max_pages_per_round => $options->get(name => 'max-pages-per-round'),
  		no_initial_vacuum => $options->get(name => 'no-initial-vacuum'),
  		no_routine_vacuum => $options->get(name => 'no-routine-vacuum'),
  		no_final_analyze => $options->get(name => 'no-final-analyze'),
  		delay_constant => $options->get(name => 'delay-constant'),
  		delay_ratio => $options->get(name => 'delay-ratio'),
  		force => $options->get(name => 'force'),
  		reindex => $options->get(name => 'reindex'),
  		print_reindex_queries => $options->get(name => 'print-reindex-queries'),
  		progress_report_period => $options->get(
  			name => 'progress-report-period'),
  		pgstattuple_schema_name => $arg_hash{'pgstattuple_schema_name'},
  		pages_per_round_divisor => 1000,
  		pages_before_vacuum_lower_divisor => 16,
  		pages_before_vacuum_lower_threshold => 1000,
  		pages_before_vacuum_upper_divisor => 50,
  		max_retry_count => $options->get(name => 'max-retry-count'));
  }
  
  =head2 B<get_database_adapter()>
  
  An availble on the system database adapter prototype service.
  
  =cut
  
  sub get_database_adapter {
  	my ($self, %arg_hash) = @_;
  
  	my $options = $self->get_options();
  
  	my %param_hash = (
  		dbname => $arg_hash{'dbname'},
  		host => $options->get(name => 'host'),
  		port => $options->get(name => 'port'),
  		user => $options->get(name => 'user'),
  		password => $options->get(name => 'password'),
  		set_hash => {
  			'synchronous_commit' => 'off',
  			'session_replication_role' => 'replica'});
  
  	my $constructor_list = [
  		sub {
  			return PgToolkit::Database::Dbi->new(
  				driver => 'Pg', %param_hash);
  		},
  		sub {
  			return PgToolkit::Database::Dbi->new(
  				driver => 'PgPP',%param_hash);
  		},
  		sub {
  			return PgToolkit::Database::Psql->new(
  				path => $options->get(name => 'path-to-psql'), %param_hash);
  		}];
  
  	return PgToolkit::DatabaseChooser->new(
  		constructor_list => $constructor_list);
  }
  
  =head2 B<get_logger()>
  
  A logger lazy loader service.
  
  =cut
  
  sub get_logger {
  	my $self = shift;
  
  	if (not defined $self->{'_logger'}) {
  		my $options = $self->get_options();
  
  		$self->{'_logger'} = PgToolkit::Logger->new(
  			level => $options->get(name => 'verbosity'));
  	}
  
  	return $self->{'_logger'};
  }
  
  =head2 B<get_options()>
  
  A options lazy loader service.
  
  =cut
  
  sub get_options {
  	my $self = shift;
  
  	if (not defined $self->{'_options'}) {
  		$self->{'_options'} = PgToolkit::Options->new(
  			definition_hash => {
  				# connection
  				'host|h:s' => undef,
  				'port|p:i' => '5432',
  				'user|U:s' => do { `whoami` =~ /(.*?)\n/; $1 },
  				'password|W:s' => undef,
  				'path-to-psql|P:s' => 'psql',
  				# target
  				'all|a:i' => 1,
  				'dbname|d:s@' => [],
  				'schema|n:s@' => [],
  				'table|t:s@' => [],
  				'exclude-dbname|D:s@' => [],
  				'exclude-schema|N:s@' => [],
  				'exclude-table|T:s@' => [],
  				# behaviour
  				'dry-run|u' => 0,
  				'no-initial-vacuum|I' => 0,
  				'no-routine-vacuum|R' => 0,
  				'no-final-analyze|L' => 0,
  				'no-pgstattuple|S' => 0,
  				'reindex|r' => 0,
  				'print-reindex-queries|s' => 0,
  				'force|f' => 0,
  				'max-pages-per-round|c:i' => 10,
  				'delay-constant|e:i' => 0,
  				'delay-ratio|E:i' => 2,
  				'max-retry-count|o:i' => 10,
  				'min-page-count|x:i' => 10,
  				'min-free-percent|y:i' => 20,
  				'progress-report-period|z:i' => 60,
  				# misc
  				'quiet|q' => 0,
  				'verbosity|v:s' => 'notice'},
  			error_check_code => sub {
  				my $option_hash = shift;
  
  				my $error;
  
  				if (exists $option_hash->{'quiet'} and
  					exists $option_hash->{'verbosity'})
  				{
  					$error = (
  						'These options can not be specified simultaniously: '.
  						'quiet, verbosity');
  				} elsif (
  					not exists $option_hash->{'all'} and
  					not exists $option_hash->{'dbname'})
  				{
  					$error = (
  						'At least one of the options must be specified: '.
  						'all, dbname');
  				};
  
  				return $error;
  			},
  			transform_code => sub {
  				my $option_hash = shift;
  				if (exists $option_hash->{'quiet'}) {
  					$option_hash->{'verbosity'} = 'warning';
  				}
  			},
  			kit => 'PgToolkit',
  			version => 'v1.0rc1');
  	}
  
  	return $self->{'_options'};
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<PgToolkit::Class>
  
  =item L<PgToolkit::Compactor::Cluster>
  
  =item L<PgToolkit::Compactor::Database>
  
  =item L<PgToolkit::Compactor::Schema>
  
  =item L<PgToolkit::Compactor::Table>
  
  =item L<PgToolkit::Database::Dbi>
  
  =item L<PgToolkit::Database::Psql>
  
  =item L<PgToolkit::DatabaseChooser>
  
  =item L<PgToolkit::Logger>
  
  =item L<PgToolkit::Options>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_REGISTRY_COMPACTOR

$fatpacked{"PgToolkit/Utils.pm"} = <<'PGTOOLKIT_UTILS';
  package PgToolkit::Utils;
  
  use base qw(PgToolkit::Class);
  
  use strict;
  use warnings;
  
  =head1 NAME
  
  B<PgToolkit::Utils> - a utility functions class.
  
  =head1 SYNOPSIS
  
  	my $utils = PgToolkit::Logger->new();
  	$utils->get_size_pretty(size => 12345);
  
  =head1 DESCRIPTION
  
  B<PgToolkit::Utils> is a class providing a set of functions solving
  different problems.
  
  =head1 METHODS
  
  =head2 B<get_size_pretty()>
  
  Converts size into human readable format. It works exactly like
  pg_size_pretty() works.
  
  =head3 Arguments
  
  =over 4
  
  =item C<size>
  
  =back
  
  =head3 Returns
  
  A string representing the size in bytes, kB, MB, GB or TB.
  
  =cut
  
  sub get_size_pretty {
  	my ($self, %arg_hash) = @_;
  
  	my $size = $arg_hash{'size'};
  
  	my $postfix_list = ['bytes', 'kB', 'MB', 'GB', 'TB'];
  	my $index = 0;
  	my $step = 10 * 1024;
  
  	while (
  		int(abs($size) + 0.5) >= $step and
  		exists $postfix_list->[$index + 1])
  	{
  		$size /= 1024;
  		$index++;
  	}
  
  	return
  		($size < 0 ? -1 : 1) * int(abs($size) + 0.5).
  		' '.$postfix_list->[$index];
  }
  
  =head1 SEE ALSO
  
  =over 4
  
  =item L<PgToolkit::Class>
  
  =back
  
  =head1 LICENSE AND COPYRIGHT
  
  Copyright (c) 2012, PostgreSQL-Consulting.com
  
  =head1 AUTHOR
  
  =over 4
  
  =item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>
  
  =back
  
  =cut
  
  1;
PGTOOLKIT_UTILS

s/^  //mg for values %fatpacked;

unshift @INC, sub {
  if (my $fat = $fatpacked{$_[1]}) {
    open my $fh, '<', \$fat
      or die "FatPacker error loading $_[1] (could be a perl installation issue?)";
    return $fh;
  }
  return
};

} # END OF FATPACK CODE
#!/usr/bin/perl

use strict;
use warnings;

use PgToolkit::Registry::Compactor;

=head1 NAME

B<pgcompactor> - a PostgreSQL bloat reducing tool.

=head1 SYNOPSIS

pgcompactor [OPTION...]

=over 4

=item General options:

[-?mV] [(-q | -v LEVEL)]

=item Connection options:

[-h HOST] [-p PORT] [-U USER] [-W PASSWD] [-P PATH]

=item Targeting options:

(-a | -d DBNAME...) [-n SCHEMA...] [-t TABLE...] [-D DBNAME...]
[-N SCHEMA...] [-T TABLE...]


=item Behavioural options:

[-IRLSrsfu] [-c PAGES] [-e SECONDS] [-E RATIO] [-o COUNT] [-x PAGES]
[-y RATIO] [-z SECONDS]

=back

=head1 DESCRIPTION

B<pgcompactor> is a maintenance tool for PostgreSQL to reduce bloat
for tables without heavily locking them.

The tool is an automation of the solutions described on the pages:

=over 4

=item L<Reducing bloat without locking|http://blog.endpoint.com/2010/09/reducing-bloat-without-locking.html>

by Joshua Tolley (on End Point's blog)

=item L<Reduce bloat of table without long/exclusive locks|http://depesz.com/index.php/2010/10/17/reduce-bloat-of-table-without-longexclusive-locks>

by Hubert Lubaczewski (aka Depesz).

=back

If pgstattuple is installed B<pgcompactor> uses it to gather a better
statistics. It is highly recommended if there are a lot of toast data
in the database.

=head1 OPTIONS

=head2 General options

=over 4

=item B<-?>

=item B<--help>

Display short help.

=item B<-m>

=item B<--man>

Display full manual.

=item B<-V>

=item B<--version>

Print version.

=item B<-q>

=item B<--quiet>

Do not display progress messages. The same as C<-v warning>.

=item B<-v> LEVEL

=item B<--verbosity> LEVEL

A verbosity level. One of C<error>, C<warning>, C<notice>, C<info>,
C<debug0> and C<debug1>. By default C<notice>.

=back

=head2 Connection options

The B<pgcompactor> tries to connect to the database with the DBI
Perl module using either DBD::Pg or DBD::PgPP driver first. If it
fails the utility tries to work via psql.

=over 4

=item B<-h> HOST

=item B<--host> HOST

A database host. By default C<localhost>.

=item B<-p> PORT

=item B<--port> PORT

A database port. By default C<5432>.

=item B<-U> USER

=item B<--user> USER

A database user. By default current system user is used (as returned
by whoami).

=item B<-W> PASSWD

=item B<--password> PASSWD

A password for the user.

=item B<-P> PATH

=item B<--path-to-psql> PATH

A path to the psql program. By default C<psql>.

=back

=head2 Targeting options

Note that if you specified a database, schema or table that is not in
the cluster it will be ignored. Redundant exclusions will be ignored
too. All this options except C<--all> can be specified several times.

=over 4

=item B<-a>

=item B<--all>

Process all databases of the cluster.

=item B<-d> DBNAME

=item B<--dbname> DBNAME

A database to process. By default all the user databses of the
instance are processed.

=item B<-D> DBNAME

=item B<--exclude-dbname> DBNAME

A database to exclude from processing.

=item B<-n> SCHEMA

=item B<--schema> SCHEMA

A schema to process. By default all the schemas of the specified
database are processed.

=item B<-N> SCHEMA

=item B<--exclude-schema> SCHEMA

A schema to exclude from processing.

=item B<-t> TABLE

=item B<--table> TABLE

A table to process. By default all the tables of the specified schema
are processed.

=item B<-T> TABLE

=item B<--exclude-table> TABLE

A table to exclude from processing.

=back

=head2 Options controlling the behaviour

=over 4

=item B<-u>

=item B<--dry-run>

Print statistics only without affecting data.

=item B<-I>

=item B<--no-initial-vacuum>

=item B<-R>

=item B<--no-routine-vacuum>

Turn off initial/routine vacuum. By default all the vacuums are
on. Final vacuum can not be turned off.

=item B<-L>

=item B<--no-final-analyze>

Turn off final analyze. By default final analyze is performed.

=item B<-S>

=item B<--no-pgstattuple>

Do not use pgstattuple even if it is installed. By default is off.

=item B<-r>

=item B<--reindex>

Reindex tables after processing.

=item B<-s>

=item B<--print-reindex-queries>

Print reindex queries. Useful if you want to perform manual
reindex later.

=item B<-f>

=item B<--force>

Try to compact even those tables that do not meet minimal bloat
requirements.

=item B<-c> PAGES

=item B<--max-pages-per-round> PAGES

An upper threshold of pages to process per round. By default it is 10.

=item B<-e> SECONDS

=item B<--delay-constant> SECONDS

A constant part of the delay between rounds in seconds. By default it
is 0.

=item B<-E> RATIO

=item B<--delay-ratio> RATIO

A dynamic part of the delay between rounds is calculated as
previous-round-time * delay-ratio. By default 2.

=item B<-o> COUNT

=item B<--max-retry-count> COUNT

A maximum number of retries in case of unsuccessful processing. By
default 10.

=item B<-x> COUNT

=item B<--min-page-count> COUNT

A minumum pages number required to be in a table or index for
processing. By default it is 10.

=item B<-y> RATIO

=item B<--min-free-percent> RATIO

A minimum free space percent required to be in a table or index for
processing. By default it is 20.

=item B<-z> SECONDS

=item B<--progress-report-period> SECONDS

An interval in seconds to report the progress of processing
with. Default is 60 seconds.

=back

=cut

sub main {
	PgToolkit::Registry::Compactor->new()->get_cluster_compactor()->process();
}

if (__PACKAGE__ eq 'main') {
	main();
}

1;

=head1 LICENSE AND COPYRIGHT

Copyright (c) 2012, PostgreSQL-Consulting.com

=head1 AUTHOR

=over 4

=item L<Maxim Boguk|mailto:maxim.boguk@postgresql-consulting.com>

=item L<Sergey Konoplev|mailto:sergey.konoplev@postgresql-consulting.com>

=back

=cut
