Compare commits

..

2 Commits

Author SHA1 Message Date
Stuart Lynn
0e24d542b3 renaming to fix typo 2016-03-07 12:55:25 -05:00
Stuart Lynn
79bd319366 bayesian_blocks function 2016-03-07 11:49:57 -05:00
14 changed files with 145 additions and 339 deletions

View File

@@ -1,187 +0,0 @@
# PostgreSQL GIS stack
#
# This image includes the following tools
# - PostgreSQL 9.5
# - PostGIS 2.2 with raster, topology and sfcgal support
# - OGR Foreign Data Wrapper
# - PgRouting
# - PDAL master
# - PostgreSQL PointCloud version master
#
# Version 1.7
FROM phusion/baseimage
MAINTAINER Vincent Picavet, vincent.picavet@oslandia.com
# Set correct environment variables.
ENV HOME /root
# Regenerate SSH host keys. baseimage-docker does not contain any, so you
# have to do that yourself. You may also comment out this instruction; the
# init system will auto-generate one during boot.
RUN /etc/my_init.d/00_regen_ssh_host_keys.sh
# Use baseimage-docker's init system.
CMD ["/sbin/my_init"]
RUN apt-get update && apt-get install -y wget ca-certificates
# Use APT postgresql repositories for 9.5 version
RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ wheezy-pgdg main 9.5" > /etc/apt/sources.list.d/pgdg.list && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
# packages needed for compilation
RUN apt-get update
RUN apt-get install -y autoconf build-essential cmake docbook-mathml docbook-xsl libboost-dev libboost-thread-dev libboost-filesystem-dev libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev libcunit1-dev libgdal-dev libgeos++-dev libgeotiff-dev libgmp-dev libjson0-dev libjson-c-dev liblas-dev libmpfr-dev libopenscenegraph-dev libpq-dev libproj-dev libxml2-dev postgresql-server-dev-9.5 xsltproc git build-essential wget
RUN add-apt-repository ppa:fkrull/deadsnakes &&\
apt-get update &&\
apt-get install -y python3.2
# application packages
RUN apt-get install -y postgresql-9.5 postgresql-plpython-9.5
# Download and compile CGAL
RUN wget https://gforge.inria.fr/frs/download.php/file/32994/CGAL-4.3.tar.gz &&\
tar -xzf CGAL-4.3.tar.gz &&\
cd CGAL-4.3 &&\
mkdir build && cd build &&\
cmake .. &&\
make -j3 && make install
# orig sfcgal method
# download and compile SFCGAL
# RUN git clone https://github.com/Oslandia/SFCGAL.git
# RUN cd SFCGAL && cmake . && make -j3 && make install
# # cleanup
# RUN rm -Rf SFCGAL
# andrewxhill fix for stable sfcgal version
RUN wget https://github.com/Oslandia/SFCGAL/archive/v1.2.0.tar.gz
RUN tar -xzf v1.2.0.tar.gz
RUN cd SFCGAL-1.2.0 && cmake . && make -j 1 && make install
RUN rm -Rf v1.2.0.tar.gz SFCGAL-1.2.0
# download and install GEOS 3.5
RUN wget http://download.osgeo.org/geos/geos-3.5.0.tar.bz2 &&\
tar -xjf geos-3.5.0.tar.bz2 &&\
cd geos-3.5.0 &&\
./configure && make && make install &&\
cd .. && rm -Rf geos-3.5.0 geos-3.5.0.tar.bz2
# Download and compile PostGIS
RUN wget http://download.osgeo.org/postgis/source/postgis-2.2.0.tar.gz
RUN tar -xzf postgis-2.2.0.tar.gz
RUN cd postgis-2.2.0 && ./configure --with-sfcgal=/usr/local/bin/sfcgal-config --with-geos=/usr/local/bin/geos-config
RUN cd postgis-2.2.0 && make && make install
# cleanup
RUN rm -Rf postgis-2.2.0.tar.gz postgis-2.2.0
# Download and compile pgrouting
RUN git clone https://github.com/pgRouting/pgrouting.git &&\
cd pgrouting &&\
mkdir build && cd build &&\
cmake -DWITH_DOC=OFF -DWITH_DD=ON .. &&\
make -j3 && make install
# cleanup
RUN rm -Rf pgrouting
# Download and compile ogr_fdw
RUN git clone https://github.com/pramsey/pgsql-ogr-fdw.git &&\
cd pgsql-ogr-fdw &&\
make && make install &&\
cd .. && rm -Rf pgsql-ogr-fdw
# Compile PDAL
RUN git clone https://github.com/PDAL/PDAL.git pdal
RUN mkdir PDAL-build && \
cd PDAL-build && \
cmake ../pdal && \
make -j3 && \
make install
# cleanup
RUN rm -Rf pdal && rm -Rf PDAL-build
# Compile PointCloud
RUN git clone https://github.com/pramsey/pointcloud.git
RUN cd pointcloud && ./autogen.sh && ./configure && make -j3 && make install
# cleanup
RUN rm -Rf pointcloud
RUN git clone https://github.com/CartoDB/cartodb-postgresql.git &&\
cd cartodb-postgresql &&\
make all install &&\
cd .. && rm -Rf cartodb-postgresql
# install pip
RUN apt-get -y install python-dev python-pip liblapack-dev gfortran libyaml-dev
RUN pip install numpy pandas scipy theano keras sklearn
RUN pip install pysal
# get compiled libraries recognized
RUN ldconfig
# clean packages
# all -dev packages
# RUN apt-get remove -y --purge autotools-dev libgeos-dev libgif-dev libgl1-mesa-dev libglu1-mesa-dev libgnutls-dev libgpg-error-dev libhdf4-alt-dev libhdf5-dev libicu-dev libidn11-dev libjasper-dev libjbig-dev libjpeg8-dev libjpeg-dev libjpeg-turbo8-dev libkrb5-dev libldap2-dev libltdl-dev liblzma-dev libmysqlclient-dev libnetcdf-dev libopenthreads-dev libp11-kit-dev libpng12-dev libpthread-stubs0-dev librtmp-dev libspatialite-dev libsqlite3-dev libssl-dev libstdc++-4.8-dev libtasn1-6-dev libtiff5-dev libwebp-dev libx11-dev libx11-xcb-dev libxau-dev libxcb1-dev libxcb-dri2-0-dev libxcb-dri3-dev libxcb-glx0-dev libxcb-present-dev libxcb-randr0-dev libxcb-render0-dev libxcb-shape0-dev libxcb-sync-dev libxcb-xfixes0-dev libxdamage-dev libxdmcp-dev libxerces-c-dev libxext-dev libxfixes-dev libxshmfence-dev libxxf86vm-dev linux-libc-dev manpages-dev mesa-common-dev libgcrypt11-dev unixodbc-dev uuid-dev x11proto-core-dev x11proto-damage-dev x11proto-dri2-dev x11proto-fixes-dev x11proto-gl-dev x11proto-input-dev x11proto-kb-dev x11proto-xext-dev x11proto-xf86vidmode-dev xtrans-dev zlib1g-dev
# installed packages
# RUN apt-get remove -y --purge autoconf build-essential cmake docbook-mathml docbook-xsl libboost-dev libboost-filesystem-dev libboost-timer-dev libcgal-dev libcunit1-dev libgdal-dev libgeos++-dev libgeotiff-dev libgmp-dev libjson0-dev libjson-c-dev liblas-dev libmpfr-dev libopenscenegraph-dev libpq-dev libproj-dev libxml2-dev postgresql-server-dev-9.5 xsltproc git build-essential wget
# additional compilation packages
# RUN apt-get remove -y --purge automake m4 make
# ---------- SETUP --------------
# add a baseimage PostgreSQL init script
RUN mkdir /etc/service/postgresql
ADD postgresql.sh /etc/service/postgresql/run
# Adjust PostgreSQL configuration so that remote connections to the
# database are possible.
RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.5/main/pg_hba.conf
# And add ``listen_addresses`` to ``/etc/postgresql/9.5/main/postgresql.conf``
RUN echo "listen_addresses='*'" >> /etc/postgresql/9.5/main/postgresql.conf
# Expose PostgreSQL
EXPOSE 5432
# Add VOLUMEs to allow backup of config, logs and databases
VOLUME ["/data", "/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"]
# Add pip
# http://bugs.python.org/issue19846
# > At the moment, setting "LANG=C" on a Linux system *fundamentally breaks Python 3*, and that's not OK.
ENV LANG C.UTF-8
# add database setup upon image start
ADD pgpass /root/.pgpass
RUN chmod 700 /root/.pgpass
RUN mkdir -p /etc/my_init.d
ADD init_db_script.sh /etc/my_init.d/init_db_script.sh
ADD init_db.sh /root/init_db.sh
ADD run_tests.sh /root/run_tests.sh
ADD run_tests.sh /root/run_server.sh
# ---------- Final cleanup --------------
#
# Clean up APT when done.
# RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

View File

@@ -7,34 +7,6 @@ CartoDB Spatial Analysis extension for PostgreSQL.
* *pg* contains the PostgreSQL extension source code
* *python* Python module
## Running with Docker
Crankshaft comes with a Dockerfile to build and run a sandboxed machine for testing
and development.
First you have to build the docker container
docker build -t crankshaft .
To run the pg tests run
docker run -it --rm -v $(pwd):/crankshaft crankshaft /root/run_tests.sh
if there are failures it will dump the reasion to the screen.
To run a server you can develop on run
docker run -it --rm -v $(pwd):/crankshaft -p $(docker-machine ip default):5432:5432 /root/run_server.sh
and connect from you host using
psql -U pggis -h $(docker-machine ip default) -p 5432 -W
the password is pggis
## Requirements
* pip

View File

@@ -1,77 +0,0 @@
#!/bin/bash
# wait for pg server to be ready
echo "Waiting for PostgreSQL to run..."
sleep 1
while ! /usr/bin/pg_isready -q
do
sleep 1
echo -n "."
done
# PostgreSQL running
echo "PostgreSQL running, initializing database."
# PostgreSQL user
#
# create postgresql user pggis
/sbin/setuser postgres /usr/bin/psql -c "CREATE USER pggis with SUPERUSER PASSWORD 'pggis';"
/sbin/setuser postgres /usr/bin/psql -c "CREATE role publicuser;"
# == Auto restore dumps ==
#
# If we find some postgresql dumps in /data/restore, then we load it
# in new databases
shopt -s nullglob
for f in /data/restore/*.backup
do
echo "Found database dump to restore : $f"
DBNAME=$(basename -s ".backup" "$f")
echo "Creating a new database $DBNAME.."
/usr/bin/psql -U pggis -h localhost -c "CREATE DATABASE $DBNAME WITH OWNER = pggis ENCODING = 'UTF8' TEMPLATE = template0 CONNECTION LIMIT = -1;" postgres
/usr/bin/psql -U pggis -h localhost -w -c "CREATE EXTENSION citext; CREATE EXTENSION pg_trgm; CREATE EXTENSION btree_gist; CREATE EXTENSION hstore; CREATE EXTENSION fuzzystrmatch; CREATE EXTENSION unaccent; CREATE EXTENSION postgres_fdw; CREATE EXTENSION pgcrypto; CREATE EXTENSION plpythonu; CREATE EXTENSION postgis; CREATE EXTENSION postgis_topology; CREATE EXTENSION pgrouting; CREATE EXTENSION pointcloud; CREATE EXTENSION pointcloud_postgis; CREATE EXTENSION postgis_sfcgal; drop type if exists texture; create type texture as (url text,uv float[][]);CREATE ROLE publicuser;" $DBNAME
# /usr/bin/psql -U pggis -h localhost -w -f /usr/share/postgresql/9.5/contrib/postgis-2.1/sfcgal.sql -d $DBNAME
echo "Restoring database $DBNAME.."
/usr/bin/pg_restore -U pggis -h localhost -d $DBNAME -w "$f"
echo "creating public user"
/usr/bin/psql -U pggis -h localhost -w -c "CREATE ROLE publicuser;"
echo "Restore done."
done
# == Auto restore SQL backups ==
#
# If we find some postgresql sql scripts /data/restore, then we load it
# in new databases
shopt -s nullglob
for f in /data/restore/*.sql
do
echo "Found database SQL dump to restore : $f"
DBNAME=$(basename -s ".sql" "$f")
echo "Creating a new database $DBNAME.."
/usr/bin/psql -U pggis -h localhost -c "CREATE DATABASE $DBNAME WITH OWNER = pggis ENCODING = 'UTF8' TEMPLATE = template0 CONNECTION LIMIT = -1;" postgres
/usr/bin/psql -U pggis -h localhost -w -c "CREATE EXTENSION citext; CREATE EXTENSION pg_trgm; CREATE EXTENSION btree_gist; CREATE EXTENSION hstore; CREATE EXTENSION fuzzystrmatch; CREATE EXTENSION unaccent; CREATE EXTENSION postgres_fdw; CREATE EXTENSION pgcrypto; CREATE EXTENSION plpythonu; CREATE EXTENSION postgis; CREATE EXTENSION postgis_topology; CREATE EXTENSION postgis_sfcgal; CREATE EXTENSION pgrouting; CREATE EXTENSION pointcloud; CREATE EXTENSION pointcloud_postgis; drop type if exists texture; create type texture as (url text,uv float[][]);" $DBNAME
# /usr/bin/psql -U pggis -h localhost -w -f /usr/share/postgresql/9.5/contrib/postgis-2.1/sfcgal.sql -d $DBNAME
echo "Restoring database $DBNAME.."
/usr/bin/psql -U pggis -h localhost -d $DBNAME -w -f "$f"
echo "Restore done."
done
# == create new database pggis ==
echo "Creating a new empty database..."
# create user and main database
/usr/bin/psql -U pggis -h localhost -c "CREATE DATABASE pggis WITH OWNER = pggis ENCODING = 'UTF8' TEMPLATE = template0 CONNECTION LIMIT = -1;" postgres
# activate all needed extension in pggis database
/usr/bin/psql -U pggis -h localhost -w -c "CREATE EXTENSION citext; CREATE EXTENSION pg_trgm; CREATE EXTENSION btree_gist; CREATE EXTENSION hstore; CREATE EXTENSION fuzzystrmatch; CREATE EXTENSION unaccent; CREATE EXTENSION postgres_fdw; CREATE EXTENSION pgcrypto; CREATE EXTENSION plpythonu; CREATE EXTENSION postgis; CREATE EXTENSION postgis_topology; CREATE EXTENSION postgis_sfcgal; CREATE EXTENSION pgrouting; CREATE EXTENSION pointcloud; CREATE EXTENSION pointcloud_postgis; drop type if exists texture;
create type texture as (url text,uv float[][]);" pggis
#/usr/bin/psql -U pggis -h localhost -w -f /usr/share/postgresql/9.5/contrib/postgis-2.1/sfcgal.sql -d pggis
echo "Database initialized. Connect from host with :"
echo "psql -h localhost -p <PORT> -U pggis -W pggis"
echo "Get <PORT> value with 'docker ps'"

View File

@@ -1,3 +0,0 @@
#!/bin/sh
# Script for my_init.d, so as to run database init without blocking
/root/init_db.sh &

View File

@@ -137,6 +137,53 @@ BEGIN
END;
$$
LANGUAGE plpgsql VOLATILE;
CREATE OR REPLACE FUNCTION
cdb_create_segment (
segment_name TEXT,
table_name TEXT,
column_name TEXT,
geoid_column TEXT DEFAULT 'geoid',
census_table TEXT DEFAULT 'block_groups'
)
RETURNS NUMERIC
AS $$
from crankshaft.segmentation import create_segemnt
# TODO: use named parameters or a dictionary
return create_segment('table')
$$ LANGUAGE plpythonu;
CREATE OR REPLACE FUNCTION
cdb_predict_segment (
segment_name TEXT,
geoid_column TEXT DEFAULT 'geoid',
census_table TEXT DEFAULT 'block_groups'
)
RETURNS TABLE(geoid TEXT, prediction NUMERIC)
AS $$
from crankshaft.segmentation import create_segemnt
# TODO: use named parameters or a dictionary
return create_segment('table')
$$ LANGUAGE plpythonu;
CREATE OR REPLACE FUNCTION
cdb_adaptive_histogram (
table_name TEXT,
column_name TEXT
)
RETURNS TABLE (bin_start numeric,bin_end numeric,value numeric)
AS $$
from crankshaft.bayesian_blocks import adaptive_histogram
return adaptive_histogram(table_name,column_name)
$$ LANGUAGE plpythonu;
CREATE OR REPLACE FUNCTION
cdb_simple_test (
)
RETURNS NUMERIC
AS $$
return 5
$$ LANGUAGE plpythonu;
-- Make sure by default there are no permissions for publicuser
-- NOTE: this happens at extension creation time, as part of an implicit transaction.
-- REVOKE ALL PRIVILEGES ON SCHEMA cdb_crankshaft FROM PUBLIC, publicuser CASCADE;

View File

@@ -0,0 +1,11 @@
CREATE OR REPLACE FUNCTION
cdb_adaptive_histogram (
table_name TEXT,
column_name TEXT
)
RETURNS TABLE (bin_start numeric,bin_end numeric,value numeric)
AS $$
from crankshaft.bayesian_blocks import adaptive_histogram
return adaptive_histogram(table_name,column_name)
$$ LANGUAGE plpythonu;

1
pgpass
View File

@@ -1 +0,0 @@
localhost:5432:*:pggis:pggis

View File

@@ -1,5 +0,0 @@
#!/bin/sh
# `/sbin/setuser postgres` runs the given command as the user `postgres`.
# If you omit that part, the command will be run as root.
rm -rf /etc/ssl/private-copy; mkdir /etc/ssl/private-copy; mv /etc/ssl/private/* /etc/ssl/private-copy/; rm -r /etc/ssl/private; mv /etc/ssl/private-copy /etc/ssl/private; chmod -R 0700 /etc/ssl/private; chown -R postgres /etc/ssl/private
exec /sbin/setuser postgres /usr/lib/postgresql/9.5/bin/postgres -D /var/lib/postgresql/9.5/main -c config_file=/etc/postgresql/9.5/main/postgresql.conf >> /var/log/postgresql.log 2>&1

View File

@@ -1,2 +1,3 @@
import random_seeds
import clustering
import bayesian_blocks

View File

@@ -0,0 +1 @@
from bayesian_blocks import *

View File

@@ -0,0 +1,84 @@
import plpy
import numpy as np
def adaptive_histogram(table_name,column_name):
data = plpy.execute("select {column_name} from {table_name}".format(**locals()))
data = [float(d['count']) for d in data]
plpy.notice(data)
vals, bins = np.histogram( data, bins=_bayesian_blocks(data))
return zip(vals,bins, bins[1:])
def _bayesian_blocks(t):
"""Bayesian Blocks Implementation
By Jake Vanderplas. License: BSD
Based on algorithm outlined in http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
Parameters
----------
t : ndarray, length N
data to be histogrammed
Returns
-------
bins : ndarray
array containing the (N+1) bin edges
Notes
-----
This is an incomplete implementation: it may fail for some
datasets. Alternate fitness functions and prior forms can
be found in the paper listed above.
"""
# copy and sort the array
t = np.sort(t)
N = t.size
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1],
0.5 * (t[1:] + t[:-1]),
t[-1:]])
block_length = t[-1] - edges
# arrays needed for the iteration
nn_vec = np.ones(N)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
#-----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
#-----------------------------------------------------------------
for K in range(N):
# Compute the width and count of the final bin for all possible
# locations of the K^th changepoint
width = block_length[:K + 1] - block_length[K + 1]
count_vec = np.cumsum(nn_vec[:K + 1][::-1])[::-1]
# evaluate fitness function for these possibilities
fit_vec = count_vec * (np.log(count_vec) - np.log(width))
fit_vec -= 4 # 4 comes from the prior on the number of changepoints
fit_vec[1:] += best[:K]
# find the max of the fitness: this is the K^th changepoint
i_max = np.argmax(fit_vec)
last[K] = i_max
best[K] = fit_vec[i_max]
#-----------------------------------------------------------------
# Recover changepoints by iteratively peeling off the last block
#-----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]

View File

@@ -40,7 +40,7 @@ setup(
# The choice of component versions is dictated by what's
# provisioned in the production servers.
install_requires=['pysal==1.11.0','numpy==1.6.1','scipy==0.17.0'],
install_requires=['pysal==1.11.0','numpy==1.10.1','scipy==0.17.0'],
requires=['pysal', 'numpy'],

View File

@@ -1,14 +0,0 @@
#!/bin/bash
/sbin/my_init &
echo "Waiting for PostgreSQL to run..."
sleep 1
while ! /usr/bin/pg_isready -q
do
sleep 1
echo -n "."
done
cd /crankshaft/pg
make install
fg

View File

@@ -1,23 +0,0 @@
#!/bin/bash
/sbin/my_init &
echo "Waiting for PostgreSQL to run..."
sleep 1
while ! /usr/bin/pg_isready -q
do
sleep 1
echo -n "."
done
cd /crankshaft/pg
make install
PGUSER=pggis PGPASSOWRD=pggis PGHOST=localhost make installcheck
if [ "$?" -eq "0" ]
then
echo "PASSED"
else
cat /crankshaft/pg/test/0.0.1/regression.diffs
fi