select pg_stat_activity.datname, pg_class.relname, pg_locks.transaction, pg_locks.mode, pg_locks.granted, pg_stat_activity.usename, substr(pg_stat_activity.current_query,1,30), pg_stat_activity.query_start, age(now(),pg_stat_activity.query_start) as "age", pg_stat_activity.procpid from pg_stat_activity, pg_locks left outer join pg_class on (pg_locks.relation = pg_class.oid) where pg_locks.pid=pg_stat_activity.procpid order by query_start;
Showing posts with label tip. Show all posts
Showing posts with label tip. Show all posts
Saturday, November 7, 2009
How to find locks in postgres ?
Table pg_locks is very usefull...
Friday, November 6, 2009
Update massive number of records
Simple Pl/SQL procedure which help to do massive update.
create or replace PROCEDURE move_column AS type num_table IS TABLE OF NUMBER INDEX BY pls_integer; round_trips number; recid_table num_table; avid_table num_table; scale NUMBER; starting_point NUMBER; ending_point NUMBER; BEGIN SELECT recid, attrib_value_id bulk collect INTO recid_table, avid_table FROM very_big_table; scale := SQL % rowcount; round_trips := TRUNC((scale + 9999) / 10000); FOR bite IN 1 .. round_trips LOOP starting_point :=(bite -1) *10000 + 1; ending_point := least(scale, bite *10000); forall this IN starting_point .. ending_point UPDATE another_very_big_table SET rec_id = recid_table(this) WHERE id = avid_table(this); COMMIT; --DBMS_OUTPUT.PUT_LINE('Move: ' || ending_point); END LOOP; END move_column;
Friday, October 23, 2009
simple program to save file from c++ to (mongodb) gridfs

MongoDB C++ Tutorial http://www.mongodb.org/pages/viewpage.action?pageId=133415
The best start is building MongoDB from source (Ubuntu|Debian).
$sudo apt-get install g++ scons libpcre++-dev libmozjs-dev libpcap-dev libboost-dev
$cd /usr/src
$sudo git clone git://github.com/mongodb/mongo.git
$cd mongodb
$scons
$scons --prefix=/opt/mongo install
$cd ~
$gvim test_gridfs.cpp
#include <iostream> #include <vector> #include <boost/algorithm/string.hpp> #include <mongo/client/dbclient.h> #include <mongo/client/gridfs.h> // g++ tutorial.cpp -lmongoclient -lboost_thread -lboost_filesystem -o tutorial using namespace std; using namespace mongo; int main(int argc, const char **argv) { const char *fileName = ""; std::vector<std::string> strs; if (argc != 2) { cerr << "Usage " << argv[0] << " local_file " << endl; return -12; } fileName = argv[1]; //to generate gridfs file name boost::split(strs, fileName, boost::is_any_of("/")); DBClientConnection c; c.connect("localhost"); cout << "connected ok" <<endl; GridFS gfs = GridFS(c, "test", "testcpp"); gfs.storeFile(fileName, strs[strs.size()-1]); cout << "file stored" << endl; return 0; }
g++ -o file_to_gridfs.o -c -I/opt/mongo/include file_to_gridfs.cpp
g++ -o file_to_gridfs file_to_gridfs.o -L/opt/mongo/lib -lmongoclient -lboost_thread -lboost_filesystem
Tuesday, October 20, 2009
mongoDB gridfs and sharding
If you want use gridfs and sharding chunks. In example is the mistake. Sharding by "_id" dosen't work.
Working example.
There is at leas two sharding strategy, by file_id or by (n, id).
Sharding by file_id is no RAID or RAID-1 (on file level) sharding by n when you use many servers can be like (RAID-0, RAID-10). On collection level performance is always RAID-0, RAID-10 ;-)
Working example.
First .... http://www.mongodb.org/display/DOCS/A+Sample+Configuration+Session and then ... $mongo > use admin switched to db admin > db.runCommand( { shardcollection : "test.dexters.chunks", key : { n : 1 } } ) {"collectionsharded" : "test.dexters.chunks" , "ok" : 1} $vim test_load.py ==================================================== #!/usr/bin/env python import sys import os.path from pymongo.connection import Connection from gridfs import GridFS connection = Connection("localhost", 27017) db = connection["test"] name = os.path.basename(sys.argv[1]) fs = GridFS(db) fp = fs.open(name, 'w', 'dexters') for l in open(sys.argv[1]): fp.write(l) fp.close() ====================================================
There is at leas two sharding strategy, by file_id or by (n, id).
Sharding by file_id is no RAID or RAID-1 (on file level) sharding by n when you use many servers can be like (RAID-0, RAID-10). On collection level performance is always RAID-0, RAID-10 ;-)
Monday, October 19, 2009
Problem with Oracle Enterprise manager
If you can't run Oracle Enterprise manager ...
You must chenge directory to Apache log directory an delete all files.
That's all ...
$ ./opmnctl startall
ias-component/process-type/process-set:
HTTP_Server/HTTP_Server/HTTP_Server/
Error
--> Process (index=1,uid=186798,pid=9999)
failed to start a managed process after the maximum retry limit
$less Log:
/usr/home/oracle/product/10.1.3/OracleAS_1/opmn/logs/HTTP_Server~1
ias-component/process-type/process-set:
OC4J/home/default_group/
Error
--> Process (index=1,uid=1867980864,pid=450578)
time out while waiting for a managed process to start
Log:
/usr/home/oracle/product/10.1.3/OracleAS_1/opmn/logs/OC4J~home~default_group~1
You must chenge directory to Apache log directory an delete all files.
$cd ../../Apache/Apache/logs
$rm *
That's all ...
Friday, October 16, 2009
How to drop all tables from database using one SQL query

How to drop all tables from database using one SQL query without destroying database. It's impossible in Postgresql, MySQL ,Oracle. But we can generate sql.
#MySQL mysql> SELECT CONCAT('DROP TABLE ', TABLE_NAME,' ;') FROM information_schema.TABLES WHERE TABLE_SCHEMA='mysql'; +----------------------------------------+ | CONCAT('DROP TABLE ', TABLE_NAME,' ;') | +----------------------------------------+ | DROP TABLE columns_priv ; | | DROP TABLE db ; | | DROP TABLE func ; | | DROP TABLE help_category ; | | DROP TABLE help_keyword ; | | DROP TABLE help_relation ; | | DROP TABLE help_topic ; | | DROP TABLE host ; | | DROP TABLE proc ; | | DROP TABLE procs_priv ; | | DROP TABLE tables_priv ; | | DROP TABLE time_zone ; | | DROP TABLE time_zone_leap_second ; | | DROP TABLE time_zone_name ; | | DROP TABLE time_zone_transition ; | | DROP TABLE time_zone_transition_type ; | | DROP TABLE user ; | +----------------------------------------+ 17 rows in set (0.02 sec) Execute drop commands many times becouse of foreign keys ;-) #Postgresql SELECT 'DROP TABLE '||c.relname ||' CASCADE;' FROM pg_catalog.pg_class c JOIN pg_catalog.pg_roles r ON r.oid = c.relowner LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r','') AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid) ORDER BY 1; ?column? ------------------------------------------------- DROP TABLE auth_group_permissions CASCADE; DROP TABLE auth_group CASCADE; DROP TABLE auth_message CASCADE; DROP TABLE auth_permission CASCADE; DROP TABLE auth_user_groups CASCADE; ... #Oracle SELECT 'DROP TABLE '||table_name||';' FROM user_tables; 'DROPTABLE'||TABLE_NAME||';' ------------------------------------------ DROP TABLE SYS_EXPORT_SCHEMA_01; DROP TABLE MON_NO_CHECK; DROP TABLE REWRITE_TABLE;
Subscribe to:
Posts (Atom)