MongoDB notes

Executing a MongoDB query from command line

mongo -u 'MyUsername' -p 'MyPassword' --eval 'db.getCollection("fs.files").find({"_id": ObjectId("fffffffff")});' '127.0.0.1:27017/MyDBName' 

Show postgres lock

https://wiki.postgresql.org/wiki/Lock_Monitoring

SELECT blocked_locks.pid AS blocked_pid,
blocked_activity.usename AS blocked_user,
blocking_locks.pid AS blocking_pid,
blocking_activity.usename AS blocking_user,
blocked_activity.query AS blocked_statement,
blocking_activity.query AS current_statement_in_blocking_process
FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
JOIN pg_catalog.pg_locks blocking_locks
ON blocking_locks.locktype = blocked_locks.locktype
AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE
AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
AND blocking_locks.pid != blocked_locks.pid

JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
WHERE NOT blocked_locks.GRANTED;

Check all table size in Postgres

The “tables ” is in information_schema
The “pg_class” is in System Catalogs


SELECT ist.table_name, reltuples AS "entries", pg_size_pretty(relpages::bigint*8*1024) AS size
FROM information_schema.tables ist
INNER JOIN pg_class c ON (ist.table_name = c.relname)
WHERE
ist.table_catalog='ip2_sync_master'
AND ist.table_schema='public'
ORDER BY relpages DESC, ist.table_name ASC;

Synchronize a postgres table through bash and csv

Please note that the csv export process does not escape commas.

#!/bin/bash
DIR='/root/sql_dump'
mkdir -p "$DIR"
cd "$DIR"

SIZE=100000
#START=611244350
START=0
END=$((START + SIZE))
STOP=189097000
TABLNAME="schema.tablename"
while [[ $START -lt $STOP ]] && [[ $END -le $STOP ]]; do
export PGPASSWORD="PASSWORD#1"

echo "Read from SOURCE start $START end $END"
FILE="${DIR}/dump${START}.csv"
SQL="SELECT * FROM ${TABLNAME} where id > $START and id <= $END" psql -U postgres -h SOURCEHOST -d DB1 -t -A -F"," -c "$SQL" > "$FILE"

export PGPASSWORD="PASSWORD#1"
SQL="DELETE FROM ${TABLNAME} where id > $START and id <= $END" psql -U postgres -h localhost -d DB2 -t -A -F"," -c "$SQL" psql -U postgres -h localhost -d DB2 -c "\copy ${TABLNAME} FROM '${FILE}' DELIMITER ',' CSV" echo "Injected to DESTINATION" START=$END END=$((START + SIZE)) if [ $END -gt $STOP ]; then END=$STOP fi rm $FILE done

Export and Import Postgres query to CSV


# ===========================================================
# Export to CSV
# ===========================================================
# export PGPASSWORD="YOURPASSWORD"
# psql -U YOURUSERNAME -h YOURHOSTNAME -d YOURDBNAME-t -A -F"," -c "select * from YOURTABLENAME limit 2" > output.csv

# ===========================================================
# Import
# ===========================================================
# export PGPASSWORD="YOURPASSWORD"
# psql -U YOURUSERNAME -h YOURHOSTNAME -d YOURDBNAME -c "\copy YOURTABLENAME FROM '/tmp/dump.csv' DELIMITER ',' CSV"

Postgres SSD optimization

If you’re running your database on an SSD instead of a spinning disk, you might want to optimize postgres table space cost:


-- Change the tablespace cost
ALTER TABLESPACE pg_default SET ( seq_page_cost = 20, random_page_cost = 1 );
-- Verifiy the change
SELECT * FROM pg_tablespace;

-- Undo the tablespace cost
ALTER TABLESPACE pg_default RESET ( seq_page_cost, random_page_cost);
-- Verifiy the change
SELECT * FROM pg_tablespace;

Check which database object depends on (has reference to) your table


SELECT R.TABLE_NAME
FROM INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE u
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS FK
ON U.CONSTRAINT_CATALOG = FK.UNIQUE_CONSTRAINT_CATALOG
AND U.CONSTRAINT_SCHEMA = FK.UNIQUE_CONSTRAINT_SCHEMA
AND U.CONSTRAINT_NAME = FK.UNIQUE_CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE R
ON R.CONSTRAINT_CATALOG = FK.CONSTRAINT_CATALOG
AND R.CONSTRAINT_SCHEMA = FK.CONSTRAINT_SCHEMA
AND R.CONSTRAINT_NAME = FK.CONSTRAINT_NAME
WHERE
-- THIS IS WHERE YOUR INTEREST IS (YOUR TABLE)
U.COLUMN_NAME = 'THE_COLUMN_NAME'
AND U.TABLE_CATALOG = 'THE_DB_NAME'
AND U.TABLE_SCHEMA = 'THE_SCHEMA_NAME'
AND U.TABLE_NAME = 'THE_TABLE_NAME'

Postgres function to devide-and-conquer (iterate) your big query

Create a postgres function to devide-and-conquer (iterate) your big query


TRUNCATE TABLE YOUR_NEW_BIG_TABLE;

DROP FUNCTION IF EXISTS pg_iterator();

CREATE OR REPLACE FUNCTION pg_iterator()
RETURNS void AS
$BODY$
DECLARE
vOffsetRecord INT;
vTotal INT;
vLimit INT;
vOffset INT;
BEGIN

-- How many?
-- select count(*) INTO currentRecord from YOUR_BIG_TABLE; -- 346,879,848
vTotal := 346879848;
-- vTotal := 5000;
vLimit := 100000;
vOffset :=0;

RAISE NOTICE 'DEBUG vTotal : %', vTotal;
RAISE NOTICE 'DEBUG vLimit : %', vLimit;
RAISE NOTICE 'DEBUG vOffset: %', vOffset;

LOOP
IF (vOffset >= vTotal) THEN
EXIT;
END IF;
EXECUTE format('INSERT INTO YOUR_NEW_BIG_TABLE (id, language, hash) SELECT id, language, hash FROM YOUR_BIG_TABLE ORDER BY id LIMIT %s OFFSET %s;', vLimit, vOffset);
RAISE NOTICE 'DEBUG vOffset: %', vOffset;
vOffset := vOffset + vLimit;
END LOOP;

END$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION pg_iterator()
OWNER TO postgres;

select pg_iterator();

Add user in mysql

CREATE USER 'YOUR_USER_NAME'@'CONNECTING_FROM_WHERE' IDENTIFIED BY 'THIS_USER_PASSWORD';
GRANT ALL PRIVILEGES ON *.* TO 'YOUR_USER_NAME'@'CONNECTING_FROM_WHERE' WITH GRANT OPTION;
FLUSH PRIVILEGES;