blog.Ring.idv.tw

Hadoop

分散式處理Sobel Edge Detector

.2010/05/24 已新增MapReduce New API版本

大約兩年前我曾用ActionScript寫了「Sobel - 邊緣偵測 for AS2」,那時純粹只是抱持著好玩的心態~ 而現在用同樣的例子改成Hadoop版本來試試~ 當然最主要就是要藉重它分散式運算的能力~ 只是這樣的應用僅需要透過「Map」階段將處理後的影像直接寫入HDFS就行了~ 不需要再經過shuffle和reduce階段來浪費頻寬等資源~ 另外值得一提的是~ 這個例子要處理的是整張影像檔~ 所以要避免在進行「Map」階段之前處於被分割的命運~ 這裡採用的作法是覆寫「isSplitable()」method並將整份檔案當作一筆Record來處理,有興趣的朋友請見附檔:

import java.awt.image.BufferedImage;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import javax.imageio.ImageIO;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.lib.NullOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.sun.image.codec.jpeg.JPEGCodec;
import com.sun.image.codec.jpeg.JPEGImageEncoder;

public class SobelProcessing extends Configured implements Tool
{

    public static class Map extends MapReduceBase implements
            Mapper<NullWritable, BytesWritable, Text, Text>
    {

        private JobConf conf;

        @Override
        public void configure(JobConf conf)
        {
            this.conf = conf;
        }

        public void map(NullWritable key, BytesWritable value,
                OutputCollector<Text, Text> output, Reporter reporter)
                throws IOException
        {
            String filename = conf.get("map.input.file");
            String output_dir = conf.get("output.dir");
            filename = getFileName(filename);
            FileSystem fs = FileSystem.get(conf);
            FSDataOutputStream dos = fs.create(new Path(output_dir + filename + ".jpg"));

            BufferedImage src = ImageIO.read(new ByteArrayInputStream(value.getBytes()));

            float sobscale = Float.valueOf(conf.get("sobscale"));
            int offsetval = Integer.valueOf(conf.get("offsetval"));

            int iw = src.getWidth();
            int ih = src.getHeight();
            BufferedImage dest = new BufferedImage(iw, ih, src.getType());

            int[][] gray = new int[iw][ih];

            for (int x = 0; x < iw; x++)
            {
                for (int y = 0; y < ih; y++)
                {
                    int rgb = src.getRGB(x, y);
                    int r = 0xFF & (rgb >> 16);
                    int g = 0xFF & (rgb >> 8);
                    int b = 0xFF & rgb;
                    gray[x][y] = (int) (0.299 * r + 0.587 * g + 0.114 * b);
                }
            }

            for (int x = 1; x < iw - 1; x++)
            {
                for (int y = 1; y < ih - 1; y++)
                {
                    int a = gray[x - 1][y - 1];
                    int b = gray[x][y - 1];
                    int c = gray[x + 1][y - 1];
                    int d = gray[x - 1][y];
                    int e = gray[x + 1][y];
                    int f = gray[x - 1][y + 1];
                    int g = gray[x][y + 1];
                    int h = gray[x + 1][y + 1];

                    int hor = (a + d + f) - (c + e + h);

                    if (hor < 0)
                        hor = -hor;

                    int vert = (a + b + c) - (f + g + h);

                    if (vert < 0)
                        vert = -vert;

                    int gc = (int) (sobscale * (hor + vert));
                    gc = (gc + offsetval);

                    if (gc > 255)
                        gc = 255;

                    int sobel = 0xff000000 | gc << 16 | gc << 8 | gc;
                    dest.setRGB(x, y, sobel);
                }
            }

            JPEGImageEncoder encoder = JPEGCodec.createJPEGEncoder(dos);
            encoder.encode(dest);
            dos.close();
        }

        public String getFileName(String s)
        {
            return s.substring(s.lastIndexOf("/"), s.lastIndexOf("."));
        }
    }

    public int run(String[] args) throws Exception
    {
        JobConf conf = new JobConf(getConf(), SobelProcessing.class);

        conf.set("sobscale", "1.0");
        conf.set("offsetval", "0");
        conf.set("output.dir", args[1]);

        conf.setJobName("SobelProcessing");
        conf.setMapperClass(Map.class);

        conf.setInputFormat(WholeFileInputFormat.class);
        conf.setOutputFormat(NullOutputFormat.class);

        conf.set("mapred.child.java.opts", "-Xmx256m");
        conf.setNumReduceTasks(0);

        WholeFileInputFormat.setInputPaths(conf, new Path(args[0]));
        JobClient.runJob(conf);
        return 0;
    }

    public static void main(String[] args)
    {
        try
        {
            int res = ToolRunner.run(new Configuration(), new SobelProcessing(), args);
            System.exit(res);
        } catch (Exception e)
        {
            e.printStackTrace();
        }

    }
}

結果:

原始碼

原始碼(New API)

2009-03-13 23:22:21 | Comments (58)

Average Length of URL?

從過年前到目前為止~ 都一直和學弟忙於將以前所實作的東西要轉換到線上版,我還需要點時間呀~ ><"

而在轉換的過程之中~ 突然想到一個問題!! 那就是全世界URL的平均長度究竟約多長?

我想這個答案只有大型搜尋引擎(GoogleYahooCuil)能給出一個較接近的答案吧~

下述是一個簡單計算這樣結果的MapReduce小程式:

URLList

http://l.yimg.com/f/a/tw/ivychang/708971_020409_420x80_0202_yahoo-elite.swf
http://l.yimg.com/tw.yimg.com/a/tw/ivychang/712756_1231_1231new350_100.swf
http://l.yimg.com/tw.yimg.com/a/tw/erinlin/721493_0123_350x200.swf
http://www.kriesi.at/wp-content/themes/dark_rainbow/js/Particles.swf
http://tw.promo.yahoo.com/2008auction/shpticket/images/top.swf
http://l.yimg.com/tw.yimg.com/a/tw/fanny/658216_101508_420x80_4.swf
http://l.yimg.com/f/a/tw/vikii/606895_shopping_center_20090203r.swf
http://l.yimg.com/f/a/tw/hedy/697827_e3_hp_012109.swf
http://l.yimg.com/tw.yimg.com/a/tw/ivychang/708334_0120_350x200_certificate_081224.swf
http://l.yimg.com/tw.yimg.com/a/tw/ivychang/708334_0120_350x100_linux_080826.swf
http://www.ysed.org.tw/3rd_upLoad/4156/index.swf

URLAvgLength

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class URLAvgLength extends Configured implements Tool {

	static enum Counter {
		URL_COUNT
	}

	public static class Map extends MapReduceBase implements
			Mapper<LongWritable, Text, Text, IntWritable> {

		private final static Text word = new Text("Len");

		public void map(LongWritable key, Text value,
				OutputCollector<Text, IntWritable> output, Reporter reporter)
				throws IOException {

			String key2 = value.toString();
			reporter.incrCounter(Counter.URL_COUNT, 1);
			output.collect(word, new IntWritable(key2.length()));
		}
	}

	public static class Reduce extends MapReduceBase implements
			Reducer<Text, IntWritable, Text, IntWritable> {

		public void reduce(Text key, Iterator<IntWritable> values,
				OutputCollector<Text, IntWritable> output, Reporter reporter)
				throws IOException {

			int sum = 0;
			while (values.hasNext()) {
				sum += values.next().get();
			}

			output.collect(key, new IntWritable(sum));
		}
	}

	public int run(String[] args) throws Exception {
		String input = "/usr/Ring/urllist/*";
		String output = "/usr/Ring/urlavglen";
		JobConf conf = new JobConf(getConf(), URLAvgLength.class);
		FileSystem fs = FileSystem.get(conf);
		fs.delete(new Path(output), true);

		conf.setJobName("URLAvgLength");
		conf.setOutputKeyClass(Text.class);
		conf.setOutputValueClass(IntWritable.class);

		conf.setMapperClass(Map.class);
		conf.setCombinerClass(Reduce.class);
		conf.setReducerClass(Reduce.class);

		conf.setInputFormat(TextInputFormat.class);
		conf.setOutputFormat(TextOutputFormat.class);

		conf.setNumReduceTasks(1);

		TextInputFormat.setInputPaths(conf, new Path(input));
		TextOutputFormat.setOutputPath(conf, new Path(output));

		RunningJob running = JobClient.runJob(conf);
		Counters ct = running.getCounters();
		long count = ct.getCounter(Counter.URL_COUNT);

		InputStream in = fs.open(new Path("hdfs://localhost:9000"+output+"/part-00000"));
		BufferedReader br = new BufferedReader(new InputStreamReader(in));
		String line = br.readLine();
		Integer value = Integer.parseInt(line.split("\t")[1]);
		System.out.println("Avg:" + value/count);
		return 0;
	}

	public static void main(String[] args) {
		try {
			int res = ToolRunner.run(new Configuration(), new URLAvgLength(),args);
			System.exit(res);
		} catch (Exception e) {
			e.printStackTrace();
		}

	}
}
Avg:67

2009-02-07 02:29:48 | Comments (1)

Pairwise Document Similarity in Large Collections with MapReduce

Pairwise Document Similarity in Large Collections with MapReduce.這是一篇由UMD的一位博士生Tamer M. Elsayed和他的指導教授所共同發表在「ACL-08: HLT」的短篇論文,主要用「MapReduce」來處理大量文件相似度的計算,如果您對這篇論文有興趣的話,請參考上述論文連結,筆者不再詳述。

下述筆者撰寫的驗證程式需要用到「Cloud9 - A MapReduce Library for Hadoop」,「Cloud9」是由UMD所開發的,主要用來作為課程的教學工具和一些文字處理方面的研究,它採用Apache License,所以您可以直接用Subversion checkout下來使用,而下述範例主要用到「PairOfIntString」和「ArrayListWritable」。

Pairwise Document Similarity

在進行「Pairwise Similarity」的「Map」階段時,筆者純粹利用Regular Expression來處理~ 這並不是最佳的處理方式(我承認偷懶~),最佳的方式應該撰寫一些特定的「OutputFormat」和「Writable」來加以處理,整個效率才會大大的提高!(如:Cloud9所提供的Tuple)

由於此範例需要處理二次的MapReduce,所以筆者直接利用「job2.addDependingJob(job1);」將兩個Job產生相依性,也就是先執行job1完成之後JobControl才會去呼叫job2開始執行。

import java.io.IOException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapred.jobcontrol.Job;
import org.apache.hadoop.mapred.jobcontrol.JobControl;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import edu.umd.cloud9.io.ArrayListWritable;
import edu.umd.cloud9.io.PairOfIntString;

public class PairwiseDS extends Configured implements Tool
{

    public static class Map extends MapReduceBase implements
            Mapper<LongWritable, Text, Text, PairOfIntString>
    {
        private Text word = new Text();

        public void map(LongWritable key, Text value,
                OutputCollector<Text, PairOfIntString> output, Reporter reporter)
                throws IOException
        {
            FileSplit fileSplit = (FileSplit) reporter.getInputSplit();
            String fileName = fileSplit.getPath().getName();
            fileName = fileName.substring(0, fileName.length() - 4);

            String line = value.toString();
            StringTokenizer tokenizer = new StringTokenizer(line);
            while (tokenizer.hasMoreTokens())
            {
                word.set(tokenizer.nextToken());
                output.collect(word, new PairOfIntString(1, fileName));
            }
        }
    }

    public static class Reduce extends MapReduceBase implements
            Reducer<Text, PairOfIntString, Text, ArrayListWritable>
    {
        public void reduce(Text key, Iterator<PairOfIntString> values,
                OutputCollector<Text, ArrayListWritable> output,
                Reporter reporter) throws IOException
        {

            ArrayList<PairOfIntString> al = new ArrayList<PairOfIntString>();
            HashMap<String, Integer> map = new HashMap<String, Integer>();

            while (values.hasNext())
            {
                PairOfIntString psi = values.next();
                if (map.containsKey(psi.getRightElement()))
                {
                    Integer i = (Integer) map.get(psi.getRightElement());
                    map.put(psi.getRightElement(), i.intValue() + 1);
                } else
                {
                    map.put(psi.getRightElement(), psi.getLeftElement());
                }
            }
            Iterator i = map.entrySet().iterator();
            while (i.hasNext())
            {
                java.util.Map.Entry m = (java.util.Map.Entry) i.next();
                al.add(new PairOfIntString((Integer) m.getValue(), (String) m
                        .getKey()));
            }
            output.collect(key, new ArrayListWritable<PairOfIntString>(al));

        }
    }

    public static class Map2 extends MapReduceBase implements
            Mapper<LongWritable, Text, Text, IntWritable>
    {
        private Text word = new Text();

        public void map(LongWritable key, Text value,
                OutputCollector<Text, IntWritable> output, Reporter reporter)
                throws IOException
        {
            String line = value.toString().trim();

            ArrayList<String> keyList = new ArrayList<String>();
            ArrayList<Integer> valList = new ArrayList<Integer>();

            String p = "\\(([0-9]+), ([a-z0-9.]+)\\)";
            Pattern r = Pattern.compile(p);
            Matcher m = r.matcher(line);
            while (m.find())
            {
                String k = m.group(2);
                String v = m.group(1);
                keyList.add(k);
                valList.add(new Integer(v));
            }

            if (keyList.size() > 1)
            {
                String[] key_arr = keyList.toArray(new String[0]);
                Integer[] val_arr = valList.toArray(new Integer[0]);
                int klen = key_arr.length;
                for (int i = 0; i < klen; i++)
                {
                    for (int j = i + 1; j < klen; j++)
                    {
                        word.set(key_arr[i] + "," + key_arr[j]);
                        output.collect(word, new IntWritable(val_arr[i]
                                * val_arr[j]));
                    }
                }
            }

        }
    }

    public static class Reduce2 extends MapReduceBase implements
            Reducer<Text, IntWritable, Text, IntWritable>
    {
        public void reduce(Text key, Iterator<IntWritable> values,
                OutputCollector<Text, IntWritable> output, Reporter reporter)
                throws IOException
        {
            int sum = 0;
            while (values.hasNext())
            {
                sum += values.next().get();
            }
            output.collect(key, new IntWritable(sum));
        }
    }

    public int run(String[] args) throws Exception
    {
        // ===================== Indexing =====================
        JobConf conf = new JobConf(getConf(), PairwiseDS.class);
        conf.setJobName("Indexing");
        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(PairOfIntString.class);

        conf.setMapperClass(Map.class);
        conf.setReducerClass(Reduce.class);

        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        conf.setNumReduceTasks(1);
        FileInputFormat.setInputPaths(conf, new Path(args[0]));
        TextOutputFormat.setOutputPath(conf, new Path(args[1]));

        Job job1 = new Job(conf);
        // ===================== Pairwise Similarity =====================
        JobConf conf2 = new JobConf(getConf(), PairwiseDS.class);
        conf2.setJobName("Pairwise Similarity");
        conf2.setOutputKeyClass(Text.class);
        conf2.setOutputValueClass(IntWritable.class);

        conf2.setMapperClass(Map2.class);
        conf2.setReducerClass(Reduce2.class);

        conf2.setInputFormat(TextInputFormat.class);
        conf2.setOutputFormat(TextOutputFormat.class);
        conf2.setNumReduceTasks(1);

        FileInputFormat.setInputPaths(conf2, new Path(args[1] + "/p*"));
        TextOutputFormat.setOutputPath(conf2, new Path(args[2]));
        Job job2 = new Job(conf2);

        job2.addDependingJob(job1);
        JobControl controller = new JobControl("Pairwise Document Similarity");
        controller.addJob(job1);
        controller.addJob(job2);
        new Thread(controller).start();

        while (!controller.allFinished())
        {
            System.out.println("Jobs in waiting state: "+ controller.getWaitingJobs().size());
            System.out.println("Jobs in ready state: "+ controller.getReadyJobs().size());
            System.out.println("Jobs in running state: "+ controller.getRunningJobs().size());
            System.out.println("Jobs in success state: "+ controller.getSuccessfulJobs().size());
            System.out.println("Jobs in failed state: "+ controller.getFailedJobs().size());
            System.out.println();

            try
            {
                Thread.sleep(20000);
            } catch (Exception e)
            {
                e.printStackTrace();
            }
        }
        return 0;

    }

    public static void main(String[] args) throws Exception
    {
        int res = ToolRunner.run(new Configuration(), new PairwiseDS(), args);
        System.exit(res);
    }
}

after Indexing:

after Pairwise Similarity:

相關資源

Hadoop常用SDK系列四 JobControl

2009-01-05 22:30:04 | Comments (4)

Hadoop - Uncompressed SequenceFile Format 詳解

本文已同步刊於:Hadoop Taiwan User Group

剛剛在研究一個「Uncompressed SequenceFile Format」檔案,一個個對照Hadoop的原始碼來驗證~ 心得整理如下:

從「Class SequenceFile」所描述的~ 基本上「SequenceFiles」有三種不同的檔案格式~ 它們分別為「Uncompressed SequenceFile Format」、「Record-Compressed SequenceFile Format」和「Block-Compressed SequenceFile Format」,後兩種都是採用壓縮的檔案格式~ 而文本主要介紹剖析「Uncompressed SequenceFile Format」~ 了解這一個檔案格式之後~ 另外兩個自然能得心應手~ 而官方針對這個檔案格式的描述如下:

每一種檔案格式都包含了共同的「SequenceFile Header」用來記錄一些基本資訊~ 如:keyClassName、valueClassName等…

本文以下圖的範例來介紹:

筆者已用「紅→藍→綠」顏色的順序來標記~ 以方便對照~

0x53 0x45 0x51

這是SequenceFile Format的magic header「SEQ」,和一般的檔案格式一樣~ 都是用來判別這個檔案是否屬於「SequenceFile Format」。

0x06

版本編號,目前最新版為「SEQ6」。

0x19 0x6F 0x72 ..... 0x74

這部份屬於keyClassName(Key的類別名稱),而第1個Byte(0x19)用來表示此字串的長度,此範例為「org.apache.hadoop.io.Text」。

0x22 0x6F 0x72 ..... 0x65

這部份屬於valueClassName(Value的類別名稱),第1個Byte(0x22)也是用來表示此字串的長度,此範例為「org.apache.hadoop.io.BytesWritable」。

0x00

是否支援compression?「0x00」=否 (此為Boolean所以佔1個Byte)

0x00

是否支援blockCompression?「0x00」=否(此為Boolean所以佔1個Byte)

0x00 0x00 0x00 0x00

metadata資訊,此範例沒有包含任何「SequenceFile.Metadata」的資訊~ 所以輸出「0x00 0x00 0x00 0x00」(此為Int所以佔4個Bytes),而這四個Bytes也等同於metadata的長度,也就是至少一定會佔用這4個Bytes。

0x77 0xE5 0xEF ..... 0xA7

一個sync標記,用來表示一個「Header」的結束,此標記是亂數產生的~ 從原始碼中可得知此標記是由「new UID()+"@"+time」的方式再進行「MD5」編碼。

0x00 0x35 0x62 0x8B

整筆Record的size~ (此為Int佔4個Bytes),一筆Record包含「Key、Value」的內容資訊。

0x00 0x00 0x00 0x2C

Key內容的size~ (此為Int佔4個Bytes)。

0x2B 0x68 0x64 ..... 0x47

由於筆者用「org.apache.hadoop.io.Text」當Key,所以這裡的資訊是描述一個檔案的路徑名稱,第1個Byte(0x2B)用來表示此字串的長度,內容為「hdfs://nlp:9000/user/hdp/image/P1010099.JPG」。

0x00 0x35 0x62 0x5B

Value內容的size~ (此為Int佔4個Bytes)。

0xFF 0xD8 0xFF .....

筆者以JPEG檔案格式做為介紹~ 所以這裡是「0xFF、0xD8」開頭。

最後~ 雖然Hadoop官網沒有公佈詳細的格式介紹(還是我沒找到?) 希望本文能有所助益。

參考資源

SequenceFile - Hadoop Wiki

Class SequenceFile

2008-12-25 20:06:27 | Add Comment

Jetty - Java HTTP Servlet Server

.2008-12-15 新增Apache Commons DBCP範例

大約六、七年前剛開始初學Web Application的時候~ 那時常聽到的是TomcatResin~ 至於Jetty... 嗯~ 我聞不見其名...XD

Jetty.是一個100%以Java撰寫而成並開放源碼的HTTP Server & Servlet Container~ 從三年前的「Jetty vs. Tomcat vs. Resin: A Performance Comparison」這篇文章來看~ 它的效率似乎表現不錯~ 好了~ 回歸重點~ 為什麼我對它燃起了興趣?... 二個原因,其一,到目前Hadoop 0.19.0版所內建的Servlet Container就是採用Jetty 5.1.4~ 不過根據「(#HADOOP-1650) Upgrade Jetty to 6.x - ASF JIRA」這個issue來看~ 0.20版就會變成Jetty 6.x版了~

另一個原因,由於今年年初有協助老師將一個「English Collocations」的雛型系統改寫成Web版~ 而這樣的系統其實只需要「唯讀」資料庫的內容即可~ 想將整個這樣的系統都做成「DVD-ROM」版本~ 這樣就變成可以帶著跑的Web Application了~ 方便性大大的提高~ 所以Jetty相當適合這樣的應用!!

下面簡單地記錄一下一些常用的設定及步驟:

啟動Jetty Sever

java -jar start.jar etc/jetty.xml

手動增加一個新的Web Application (含VirualHost設定)

請修改「/etc/jetty.xml」。

<New class="org.mortbay.jetty.webapp.WebAppContext">
	<Arg><Ref id="Contexts"/></Arg>
	<Arg><SystemProperty name="jetty.home"/>/webapps/webapp</Arg>
	<Arg>/webapp</Arg>
	<Set name="defaultsDescriptor"><SystemProperty name="jetty.home" default="."/>/etc/webdefault.xml</Set>
	<Set name="VirtualHosts">
	<Array type="java.lang.String">
		<Item>localhost</Item>
	</Array>
	</Set>
</New>

測試Servlet - Hello World

import java.io.*;
import javax.servlet.*;
import javax.servlet.http.*;

public class Test extends HttpServlet
{
	public void doGet(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException
	{
		res.setContentType("text/html");
		PrintWriter out = res.getWriter();
		out.println("<html>");
		out.println("<head><title>Hello</title></head>");
		out.println("<body>");
		out.println("HIHI");
		out.println("</body>");
		out.println("</html>");
	}
}

修改「web.xml」

<servlet>
	<servlet-name>Hello</servlet-name>
	<servlet-class>Test</servlet-class>
</servlet>
<servlet-mapping>
	<servlet-name>Hello</servlet-name>
	<url-pattern>/Hello.do</url-pattern>
</servlet-mapping>

最後開啟「http://localhost:8080/webapp/Hello.do」即可。

PostgreSQL - ConnectionPoolDataSource

請修改「/etc/jetty.xml」。

<New id="DSTest" class="org.mortbay.jetty.plus.naming.Resource">
	<Arg>jdbc/DSTest</Arg>
	<Arg>
	<New class="org.postgresql.ds.PGConnectionPoolDataSource">
		<Set name="User">postgres</Set>
		<Set name="Password">xxx</Set>
		<Set name="DatabaseName">test</Set>
		<Set name="ServerName">localhost</Set>
		<Set name="PortNumber">5432</Set>
	</New>
	</Arg>
</New>

修改「web.xml」。

<resource-ref>
	<description>My DataSource Reference</description>
	<res-ref-name>jdbc/DSTest</res-ref-name>
	<res-type>javax.sql.DataSource</res-type>
	<res-auth>Container</res-auth>
</resource-ref>

一個簡單的測試程式如下:

import java.io.*;
import java.sql.*;
import javax.sql.*;
import javax.naming.*;
import javax.servlet.*;
import javax.servlet.http.*;

public class PostgresDBTest extends HttpServlet
{
	public void doGet (HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException
	{
		res.setContentType("text/html");
		PrintWriter out = res.getWriter();
		
		Connection con = null;
		Statement stmt = null;
		ResultSet rs = null;
		
		try
		{
			ConnectionPoolDataSource source = (ConnectionPoolDataSource)new InitialContext().lookup("jdbc/DSTest");
    			con = source.getPooledConnection().getConnection();  
    			stmt = con.createStatement();
    			rs = stmt.executeQuery("select * from test");
    			while(rs.next())
    			{
    				out.println("Title:"+rs.getString(1)+"<br/>");
    			}
		} catch(Exception e){
    			e.printStackTrace();
		} finally {
    			if(con != null)
    			{
        			try {
       	 				con.close();
        			}catch(SQLException e)
        			{
        				e.printStackTrace();
        			}

    			}
		}		
	}	
}

修改「web.xml」。

<servlet>
	<servlet-name>PostgresDBTest</servlet-name>
	<servlet-class>PostgresDBTest</servlet-class>
</servlet>
<servlet-mapping>
	<servlet-name>PostgresDBTest</servlet-name>
	<url-pattern>/PostgresDBTest.do</url-pattern>
</servlet-mapping>

最後開啟「http://localhost:8080/webapp/PostgresDBTest.do」即可。

Apache Commons DBCP - PostgreSQL

下載下述這三個Library,並複製到「Jetty_Home/lib」底下。

commons-dbcp.jar

commons-pool.jar

commons-collections.jar

請修改「/etc/jetty.xml」。

<New id="pgsqldbcp" class="org.mortbay.jetty.plus.naming.Resource">
	<Arg>jdbc/Blog</Arg>
	<Arg>
	<New class="org.apache.commons.dbcp.BasicDataSource">
	<Set name="driverClassName">org.postgresql.Driver</Set>
	<Set name="url">jdbc:postgresql://localhost/Blog</Set>
	<Set name="username">postgres</Set>
	<Set name="password">1234</Set>
	<Set name="maxActive">10</Set>
	</New>
	</Arg>
</New>

修改「web.xml」。

<resource-ref>
	<description>My DataSource Reference</description>
	<res-ref-name>jdbc/DSTest</res-ref-name>
	<res-type>javax.sql.DataSource</res-type>
	<res-auth>Container</res-auth>
</resource-ref>

測試程式如下:

import java.io.*;
import java.sql.*;
import javax.sql.*;
import javax.naming.*;
import javax.servlet.*;
import javax.servlet.http.*;

public class PostgresDBTest extends HttpServlet
{
	public void doGet (HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException
	{
		res.setContentType("text/html");
		PrintWriter out = res.getWriter();
		
		Connection con = null;
		Statement stmt = null;
		ResultSet rs = null;
		
		try
		{
			InitialContext ic = new InitialContext();
			DataSource source = (DataSource)ic.lookup("jdbc/Blog");
			con = source.getConnection();  
			stmt = con.createStatement();
			rs = stmt.executeQuery("select * from test");
			while(rs.next())
			{
    				out.println("Title:"+rs.getString(1)+"<br/>");
			}
		} catch(Exception e){
			e.printStackTrace();
		} finally {
			if(con != null)
			{
				try {
					con.close();
				}catch(SQLException e)
				{
					e.printStackTrace();
				}
			}
		}
	}		
}

2008-12-07 21:36:29 | Comments (5)

Next Posts~:::~Previous Posts
Copyright (C) Ching-Shen Chen. All rights reserved.

::: 搜尋 :::

::: 分類 :::

::: Ads :::

::: 最新文章 :::

::: 最新回應 :::

::: 訂閱 :::

Atom feed
Atom Comment