Tuesday, November 19, 2019

connect to guest by virt-viewer

virt-viewer -c qemu+ssh://root@server.com/system guestname

Sunday, December 28, 2014

virt-install ovh soyoustart

virt-install --virt-type kvm --name mysupername --ram 1024  --cdrom=/var/lib/libvirt/images/ubuntu-12.04.5-server-amd64.iso --disk /home/mysupername.qcow2,format=qcow2  --network bridge:br0  --vnc --vncport 65322 --os-type=linux  --accelerate
#host /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

# The loopback network interface
auto lo
iface lo inet loopback

#auto eth0
#iface eth0 inet static
# address 188.165.217.113
# netmask 255.255.255.0
# network 188.165.217.0
# broadcast 188.165.217.255
# gateway 188.165.217.254

auto br0
iface br0 inet static
 address 188.165.217.113
 netmask 255.255.255.0
 broadcast 188.165.217.255
 gateway 188.165.217.254

        bridge_ports eth0
        bridge_fd 9
        bridge_hello 2
        bridge_maxage 12
        bridge_stp off
# virtual /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

# The loopback network interface
auto lo
iface lo inet loopback

auto eth0
iface eth0 inet static
 hwaddress 02:13:11:65:8b:29 
 address x.y.z.s
 broadcast x.y.z.s
 netmask 255.255.255.255
post-up route add 188.165.217.254 dev eth0
post-up route add default gw 188.165.217.254

post-down route del 188.165.217.254 dev eth0
post-down route del default gw 188.165.217.254
dns-nameservers 8.8.8.8

Monday, December 13, 2010

Postgresql tricks

Rownum in Postgresql

http://stackoverflow.com/questions/3959692/rownum-in-postgresql/3959748#3959748


Reverse index
http://stackoverflow.com/questions/3927532/is-it-possible-to-index-select-domain-name-from-domains-where-x-example-com-li/3927783#3927783

Wednesday, August 18, 2010

How to remove duplicates from table (MySQL)

table_users(
id INT PRIMARY KEY,
username TEXT
);

delete from table_users
USING table_users, table_users as vtable
WHERE (table_users.id > vtable.id)
AND (table_users.username=vtable.username)

(table_users.id > vtable.id) - should return only one row

Thursday, July 15, 2010

Cassandra file storage

Cassandra doesn't support big blobs, there is no ready equivalent like gridfs (MongoDb). It's very important to keep in mind that Cassandra is written in Java.
http://wiki.apache.org/cassandra/FAQ#large_file_and_blob_storage

We have to slit big files into multiple chunks.


(file_name =>(size) )
(chunk_id =>( data) )
(file_name => (chunk_id, chunk_id ... )

Store file example in python (lazyboy).

# -*- coding: utf-8 -*-
# <Keyspaces>
#     <Keyspace Name=\"BigStorage\">
#        <ColumnFamily CompareWith=\"BytesType\" Name=\"Files\"/>
#        <ColumnFamily CompareWith=\"BytesType\" Name=\"Chunks\"/>
#        <ColumnFamily CompareWith=\"TimeUUIDType\" Name=\"FilesChunks\"/>
#     </Keyspace>
# </Keyspaces>
#

import sys
import uuid

from lazyboy import *
from lazyboy.key import Key

# Define your cluster(s)
connection.add_pool(\'BigStorage\', [\'10.10.2.29:9160\'])

CHUNK_SIZE = 1024*512


class FileKey(Key):
    def __init__(self, key=None):
        Key.__init__(self, \"BigStorage\", \"Files\", key)

class File(record.Record):
    _required = (\'size\',)

    def __init__(self, *args, **kwargs):
        record.Record.__init__(self, *args, **kwargs)
        self.key = FileKey()

class ChunkKey(Key):
    def __init__(self, key=None):
        Key.__init__(self, \"BigStorage\", \"Chunks\", key)

class Chunk(record.Record):
    _required = (\'data\',)

    def __init__(self, *args, **kwargs):
        record.Record.__init__(self, *args, **kwargs)
        self.key = ChunkKey()

class FileChunkKey(Key):
    def __init__(self, key=None):
        Key.__init__(self, \"BigStorage\", \"FilesChunks\", key)

class FileChunk(record.Record):
    # Anything in here _must_ be set before the object is saved
    #_required = (\'data\',)

    def __init__(self, *args, **kwargs):
        \"\"\"Initialize the record, along with a new key.\"\"\"
        record.Record.__init__(self, *args, **kwargs)
        self.key = FileChunkKey()


def store_file(file_name, file_object):
    chunk_keys = []
    file_size = 0

    new_file = File()
    new_file.key = FileKey(file_name)
    new_file.update({\'size\':0,\'stored\':0})
    new_file.save()
    
    while True:
        data = file_object.read(CHUNK_SIZE)
        if not data:
            break
            
        file_size += len(data)
        chunk = Chunk({\'data\': data } )
        key =  str(uuid.uuid1())
        chunk.key = ChunkKey( key )
        chunk_keys.append(key)
        chunk.save()
        print key
    
    for chunk_key in chunk_keys:
        file_chunk = FileChunk()
        file_chunk.update( {uuid.uuid1().bytes: chunk_key} )
        file_chunk.key = FileChunkKey(file_name)
        file_chunk.save()
    
    new_file.update({\'size\':file_size,\'stored\':1})
    new_file.save()

package eu.iddqd.casstorage;

import java.io.FileOutputStream;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import org.softao.jassandra.ByteArray;
import org.softao.jassandra.ConsistencyLevel;
import org.softao.jassandra.DriverManager;
import org.softao.jassandra.IColumn;
import org.softao.jassandra.IColumnFamily;
import org.softao.jassandra.IConnection;
import org.softao.jassandra.ICriteria;
import org.softao.jassandra.IKeySpace;
import org.softao.jassandra.JassandraException;

public class CasStorage {

 /**
  * @param args
  */
 public static void main(String[] args) {
  // TODO Auto-generated method stub
  Properties info = new Properties();
  info.put(DriverManager.CONSISTENCY_LEVEL,
    ConsistencyLevel.ONE.toString());

  try {
   IConnection connection = DriverManager.getConnection(
     \\&quot;thrift://127.0.0.1:9160\\&quot;, info);
   IKeySpace keySpace = connection.getKeySpace(\\&quot;BigStorage\\&quot;);
   IColumnFamily cfFilesChunks = keySpace.getColumnFamily(\\&quot;FilesChunks\\&quot;);
   IColumnFamily cfChunks = keySpace.getColumnFamily(\\&quot;Chunks\\&quot;);
   

   ICriteria criteria = cfFilesChunks.createCriteria();
   ICriteria chunksCriteria = cfChunks.createCriteria();
   
   String fileName = args[1];  
   criteria.keyList(fileName).columnRange(ByteArray.EMPTY, ByteArray.EMPTY, Integer.MAX_VALUE);
   
   Map&lt;String, List&lt;IColumn&gt;&gt; map = criteria.select();
   List&lt;IColumn&gt; list = map.get(fileName);
   FileOutputStream out = new FileOutputStream(args[2]);      
   
   for (int i=0; i&lt;list.size(); i++){
    String chunkKey = new String(list.get(i).getValue().toByteArray());
    chunksCriteria.keyList(chunkKey).
     columnRange(ByteArray.EMPTY, ByteArray.EMPTY, Integer.MAX_VALUE);
    Map&lt;String, List&lt;IColumn&gt;&gt; chunkMap = chunksCriteria.select();
    out.write(chunkMap.get(chunkKey).get(0).getValue().toByteArray());    
   }
   
   out.close();
   
  } catch (JassandraException e) {
   // TODO Auto-generated catch block
   e.printStackTrace();
  } catch (IOException ioe) {
   ioe.printStackTrace();
  }
  

 }
}


Wednesday, April 14, 2010

Which sql query you think is the worst for disks ...

Based on milek.blogspot.com

#!/usr/sbin/dtrace -qs

#pragma D option strsize=8192


pid$3::*mysql_parse*:entry
{
self->a=1;
self->query=copyinstr(arg1);
self->count=0;

}

pid$3::*mysql_parse*:return
/ self->a && self->count > $2 /
{
printf(\"### write() count: %d ###\\n%s\\n\\n\", self->count, self->query);

self->a=0;
self->query=0;

}

pid$3::*mysql_parse*:return
/ self->a /
{
self->a=0;
self->query=0;
}

syscall::*write*:entry
/ self->a /
{
self->count++;
}

tick-$1
{
exit(0);
} 

Wednesday, February 10, 2010

drop schema on oracle

DROP USER
ALTER USER p$prodschema ACCOUNT LOCK;
 
BEGIN
    FOR x
        IN (SELECT 'alter system disconnect session ''' || sid || ',' || serial# || ''' immediate;'
                       sqlstr
            FROM v$session
            WHERE username = 'P$PRODSCHEMA')
    LOOP
        EXECUTE IMMEDIATE x.sqlstr;
    END LOOP;
END;
 
DROP USER p$prodschema CASCADE;