+'''\r
+The sample demonstrates how to train Random Trees classifier\r
+(or Boosting classifier, or MLP, or Knearest, or Support Vector Machines) using the provided dataset.\r
+\r
+We use the sample database letter-recognition.data\r
+from UCI Repository, here is the link:\r
+\r
+Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).\r
+UCI Repository of machine learning databases\r
+[http://www.ics.uci.edu/~mlearn/MLRepository.html].\r
+Irvine, CA: University of California, Department of Information and Computer Science.\r
+\r
+The dataset consists of 20000 feature vectors along with the\r
+responses - capital latin letters A..Z.\r
+The first 10000 samples are used for training\r
+and the remaining 10000 - to test the classifier.\r
+======================================================\r
+USAGE: \r
+ letter_recog.py [--model <model>] \r
+ [--data <data fn>] \r
+ [--load <model fn>] [--save <model fn>]\r
+\r
+ Models: RTrees, KNearest, Boost, SVM, MLP\r
+'''\r
+\r
import numpy as np\r
import cv2\r
\r
\r
\r
class SVM(LetterStatModel):\r
- train_ratio = 0.1\r
def __init__(self):\r
self.model = cv2.SVM()\r
\r
import getopt\r
import sys\r
\r
+ print __doc__\r
+\r
models = [RTrees, KNearest, Boost, SVM, MLP] # NBayes\r
models = dict( [(cls.__name__.lower(), cls) for cls in models] )\r
\r
- print 'USAGE: letter_recog.py [--model <model>] [--data <data fn>] [--load <model fn>] [--save <model fn>]'\r
- print 'Models: ', ', '.join(models)\r
- print\r
\r
args, dummy = getopt.getopt(sys.argv[1:], '', ['model=', 'data=', 'load=', 'save='])\r
args = dict(args)\r