• <ins id="pjuwb"></ins>
    <blockquote id="pjuwb"><pre id="pjuwb"></pre></blockquote>
    <noscript id="pjuwb"></noscript>
          <sup id="pjuwb"><pre id="pjuwb"></pre></sup>
            <dd id="pjuwb"></dd>
            <abbr id="pjuwb"></abbr>

            boost在路上...tokenizer
            tokenizer - Break of a string or other character sequence into a series of tokens, from John Bandela
            tokenizer - 分解字串,提取內(nèi)容.作者: John Bandela

            例一:
            // simple_example_1.cpp
            #include<iostream>
            #include<boost/tokenizer.hpp>
            #include<string>

            int main(){
               using namespace std;
               using namespace boost;
               string s = "This is,  a test";
               tokenizer<> tok(s);
               for(tokenizer<>::iterator beg=tok.begin(); beg!=tok.end();++beg){
                   cout << *beg << "\n";
               }
            }

            輸出
            This
            is
            a
            test

            tokenizer默認(rèn)將單詞以空格和標(biāo)點為邊界分開.

            例二:
            #include<iostream>
            #include<boost/tokenizer.hpp>
            #include<string>

            int main(){
               using namespace std;
               using namespace boost;
               string s = "Field 1,\"putting quotes around fields, allows commas\",Field 3";
               tokenizer<escaped_list_separator<char> > tok(s);
               for(tokenizer<escaped_list_separator<char> >::iterator beg=tok.begin(); beg!=tok.end();++beg){
                   cout << *beg << "\n";
               }
            }
            輸出
            Field 1
            putting quotes around fields, allows commas
            Field 3

            雙引號之間可以有標(biāo)點.


            例三:
            // simple_example_3.cpp
            #include<iostream>
            #include<boost/tokenizer.hpp>
            #include<string>

            int main(){
               using namespace std;
               using namespace boost;
               string s = "12252001";
               int offsets[] = {2,2,4};
               offset_separator f(offsets, offsets+3);
               tokenizer<offset_separator> tok(s,f);
               for(tokenizer<offset_separator>::iterator beg=tok.begin(); beg!=tok.end();++beg){
                   cout << *beg << "\n";
               }
            }

            把12252001分解為
            12
            25
            2001

            例4:
            // char_sep_example_1.cpp
            #include <iostream>
            #include <boost/tokenizer.hpp>
            #include <string>

            int main()
            {
              std::string str = ";!!;Hello|world||-foo--bar;yow;baz|";
              typedef boost::tokenizer<boost::char_separator<char> >
                tokenizer;
              boost::char_separator<char> sep("-;|");
              tokenizer tokens(str, sep);
              for (tokenizer::iterator tok_iter = tokens.begin();
                   tok_iter != tokens.end(); ++tok_iter)
                std::cout << "<" << *tok_iter << "> ";
              std::cout << "\n";
              return EXIT_SUCCESS;
            }

            輸出
            <!!> <Hello> <world> <foo> <bar> <yow> <baz>
            自定義分隔的標(biāo)點

            例5:
                // char_sep_example_2.cpp
                #include <iostream>
                #include <boost/tokenizer.hpp>
                #include <string>

                int main()
                {
                    std::string str = ";;Hello|world||-foo--bar;yow;baz|";
                    typedef boost::tokenizer<boost::char_separator<char> >
                        tokenizer;
                    boost::char_separator<char> sep("-;", "|", boost::keep_empty_tokens);
                    tokenizer tokens(str, sep);
                    for (tokenizer::iterator tok_iter = tokens.begin();
                         tok_iter != tokens.end(); ++tok_iter)
                      std::cout << "<" << *tok_iter << "> ";
                    std::cout << "\n";
                    return EXIT_SUCCESS;
                }

            The output is:

                <> <> <Hello> <|> <world> <|> <> <|> <> <foo> <> <bar> <yow> <baz> <|> <>
            去除-; , 保留|但將它看作是分隔符,當(dāng)兩個分隔符相鄰的時候會自動加空格

            例6:
                // char_sep_example_3.cpp
                #include <iostream>
                #include <boost/tokenizer.hpp>
                #include <string>

                int main()
                {
                   std::string str = "This is,  a test";
                   typedef boost::tokenizer<boost::char_separator<char> > Tok;
                   boost::char_separator<char> sep; // default constructed
                   Tok tok(str, sep);
                   for(Tok::iterator tok_iter = tok.begin(); tok_iter != tok.end(); ++tok_iter)
                     std::cout << "<" << *tok_iter << "> ";
                   std::cout << "\n";
                   return EXIT_SUCCESS;
                }

            The output is:

                <This> <is> <,> <a> <test>
            保留標(biāo)點但將它看作分隔符

            posted on 2006-01-25 18:00 張沈鵬 閱讀(751) 評論(0)  編輯 收藏 引用

            只有注冊用戶登錄后才能發(fā)表評論。
            網(wǎng)站導(dǎo)航: 博客園   IT新聞   BlogJava   博問   Chat2DB   管理


             
            国产国产成人精品久久| 国产精品久久久久9999高清| 精品久久久一二三区| 亚洲AV无码久久精品成人 | 久久久精品人妻一区二区三区蜜桃| 三级韩国一区久久二区综合| 久久天天躁狠狠躁夜夜avapp| A级毛片无码久久精品免费| 午夜视频久久久久一区| 久久精品国产亚洲AV麻豆网站| 国产精品无码久久综合网| 久久久久99精品成人片欧美| 久久精品国产一区二区电影| 97久久香蕉国产线看观看| 精品久久久久久久久免费影院| 91精品国产高清久久久久久91| 久久久久久国产精品美女| 久久精品亚洲福利| 久久精品国产亚洲麻豆| 国产成人精品综合久久久| 日本欧美国产精品第一页久久| 久久青青草原综合伊人| 97久久精品国产精品青草| 亚洲日韩中文无码久久| 久久夜色精品国产亚洲| 色综合久久中文字幕综合网| 精品水蜜桃久久久久久久| 日韩亚洲欧美久久久www综合网| 人妻精品久久久久中文字幕69 | 国产精品欧美久久久天天影视| 香蕉久久av一区二区三区| 久久天天躁狠狠躁夜夜不卡 | 精品久久久久久国产牛牛app| 亚洲中文字幕无码一久久区| 久久午夜夜伦鲁鲁片免费无码影视| 久久亚洲高清综合| 无码精品久久一区二区三区| 性做久久久久久久久| 久久无码国产专区精品| 日韩精品久久无码中文字幕| 国内精品久久久久久99蜜桃|