pull/56/merge
zhao yi 2017-05-26 07:59:39 +00:00 committed by GitHub
commit 50f3883d27
18 changed files with 3841 additions and 0 deletions

5
.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
*~
.DS_Store
.idea
tags
_build

192
doc/en/Makefile Normal file
View File

@ -0,0 +1,192 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " applehelp to make an Apple Help Book"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " coverage to run coverage check of the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/FastDFS.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/FastDFS.qhc"
applehelp:
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
@echo
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
@echo "N.B. You won't be able to view it unless you put it in" \
"~/Library/Documentation/Help or install it in your application" \
"bundle."
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/FastDFS"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/FastDFS"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
coverage:
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
@echo "Testing of coverage in the sources finished, look at the " \
"results in $(BUILDDIR)/coverage/python.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

358
doc/en/conf.py Normal file
View File

@ -0,0 +1,358 @@
# -*- coding: utf-8 -*-
#
# FastDFS documentation build configuration file, created by
# sphinx-quickstart on Wed May 4 10:59:19 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FastDFS'
copyright = u'2016, Copyright (C) 2008 Happy Fish / YuQing'
author = u'Copyright (C) 2008 Happy Fish / YuQing'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5.0.8'
# The full version, including alpha/beta/rc tags.
release = '5.0.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'FastDFSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FastDFS.tex', u'FastDFS Documentation',
u'Copyright (C) 2008 Happy Fish / YuQing', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fastdfs', u'FastDFS Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'FastDFS', u'FastDFS Documentation',
author, 'FastDFS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True

318
doc/en/fastdfs/install.rst Normal file
View File

@ -0,0 +1,318 @@
Installation
======================
Copy right 2009 Happy Fish / YuQing
FastDFS may be copied only under the terms of the GNU General
Public License V3, which may be found in the FastDFS source kit.
Please visit the FastDFS Home Page for more detail.
* English language: http://english.csource.org/
* Chinese language: http://www.csource.org/
Installing FastDFS from Source
--------------------------------
step 1. download libfastcommon source package from github and install it
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
the github address:
https://github.com/happyfish100/libfastcommon.git
step 2. download FastDFS source package and unpack it,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
tar xzf FastDFS_v5.x.tar.gz
for example:
::
tar xzf FastDFS_v5.08.tar.gz
step 3. enter the FastDFS dir
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
cd FastDFS
step 4. execute
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
./make.sh
step 5. make install
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
./make.sh install
step 6. edit/modify the config file of tracker and storage
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
step 7. run server programs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
start the *tracker* server:
::
/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf restart
in Linux, you can start fdfs_trackerd as a service:
::
/sbin/service fdfs_trackerd start
start the *storage* server:
::
/usr/bin/fdfs_storaged /etc/fdfs/storage.conf restart
in Linux, you can start fdfs_storaged as a service:
::
/sbin/service fdfs_storaged start
step 8. run test program
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
run the client test program:
::
/usr/bin/fdfs_test <client_conf_filename> <operation>
/usr/bin/fdfs_test1 <client_conf_filename> <operation>
for example, upload a file:
::
/usr/bin/fdfs_test conf/client.conf upload /usr/include/stdlib.h
step 9. run monitor program
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
run the monitor program:
::
/usr/bin/fdfs_monitor <client_conf_filename>
tracker server config file sample please see conf/tracker.conf
storage server config file sample please see conf/storage.conf
client config file sample please see conf/client.conf
Item detail
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
1. server common items
+-----------------------+--------+---------+------+
| item name | type | default | Must |
+=======================+========+=========+======+
| base_path | string | | Y |
+-----------------------+--------+---------+------+
| disabled | boolean| false | N |
+-----------------------+--------+---------+------+
| bind_addr | string | | N |
+-----------------------+--------+---------+------+
| network_timeout | int | 30(s) | N |
+-----------------------+--------+---------+------+
| max_connections | int | 256 | N |
+-----------------------+--------+---------+------+
| log_level | string | info | N |
+-----------------------+--------+---------+------+
| run_by_group | string | | N |
+-----------------------+--------+---------+------+
| run_by_user | string | | N |
+-----------------------+--------+---------+------+
| allow_hosts | string | \* | N |
+-----------------------+--------+---------+------+
| sync_log_buff_interval| int | 10(s) | N |
+-----------------------+--------+---------+------+
| thread_stack_size | string | 1M | N |
+-----------------------+--------+---------+------+
memo::
* base_path is the base path of sub dirs:
data and logs. base_path must exist and it's sub dirs will
be automatically created if not exist.
$base_path/data: store data files
$base_path/logs: store log files
* log_level is the standard log level as syslog, case insensitive
# emerg: for emergency
# alert
# crit: for critical
# error
# warn: for warning
# notice
# info
# debug
* allow_hosts can ocur more than once, host can be hostname or ip address,
"*" means match all ip addresses, can use range like this: 10.0.1.[1-15,20]
or host[01-08,20-25].domain.com, for example:
allow_hosts=10.0.1.[1-15,20]
allow_hosts=host[01-08,20-25].domain.com
2. tracker server items
+-----------------------+--------+---------+------+
| item name | type | default | Must |
+=======================+========+=========+======+
| port | int | 22000 | N |
+-----------------------+--------+---------+------+
| store_lookup | int | 0 | N |
+-----------------------+--------+---------+------+
| store_group | string | | N |
+-----------------------+--------+---------+------+
| store_server | int | 0 | N |
+-----------------------+--------+---------+------+
| store_path | int | 0 | N |
+-----------------------+--------+---------+------+
| download_server | int | 0 | N |
+-----------------------+--------+---------+------+
| reserved_storage_space| string | 1GB | N |
+-----------------------+--------+---------+------+
memo::
* the value of store_lookup is:
0: round robin (default)
1: specify group
2: load balance (supported since V1.1)
* store_group is the name of group to store files.
when store_lookup set to 1(specify group),
store_group must be set to a specified group name.
* reserved_storage_space is the reserved storage space for system
or other applications. if the free(available) space of any stoarge
server in a group <= reserved_storage_space, no file can be uploaded
to this group (since V1.1)
bytes unit can be one of follows:
# G or g for gigabyte(GB)
# M or m for megabyte(MB)
# K or k for kilobyte(KB)
# no unit for byte(B)
3. storage server items
+--------------------------+--------+---------+------+
| item name | type | default | Must |
+==========================+========+=========+======+
| group_name | string | | Y |
+--------------------------+--------+---------+------+
| tracker_server | string | | Y |
+--------------------------+--------+---------+------+
| port | int | 23000 | N |
+--------------------------+--------+---------+------+
| heart_beat_interval | int | 30(s) | N |
+--------------------------+--------+---------+------+
| stat_report_interval | int | 300(s) | N |
+--------------------------+--------+---------+------+
| sync_wait_msec | int | 100(ms) | N |
+--------------------------+--------+---------+------+
| sync_interval | int | 0(ms) | N |
+--------------------------+--------+---------+------+
| sync_start_time | string | 00:00 | N |
+--------------------------+--------+---------+------+
| sync_end_time | string | 23:59 | N |
+--------------------------+--------+---------+------+
| store_path_count | int | 1 | N |
+--------------------------+--------+---------+------+
| store_path0 | string |base_path| N |
+--------------------------+--------+---------+------+
| store_path# | string | | N |
+--------------------------+--------+---------+------+
|subdir_count_per_path | int | 256 | N |
+--------------------------+--------+---------+------+
|check_file_duplicate | boolean| 0 | N |
+--------------------------+--------+---------+------+
| key_namespace | string | | N |
+--------------------------+--------+---------+------+
| keep_alive | boolean| 0 | N |
+--------------------------+--------+---------+------+
| sync_binlog_buff_interval| int | 60s | N |
+--------------------------+--------+---------+------+
memo::
* tracker_server can ocur more than once, and tracker_server format is
"host:port", host can be hostname or ip address.
* store_path#, # for digital, based 0
* check_file_duplicate: when set to true, must work with FastDHT server,
more detail please see INSTALL of FastDHT. FastDHT download page:
http://code.google.com/p/fastdht/downloads/list
* key_namespace: FastDHT key namespace, can't be empty when
check_file_duplicate is true. the key namespace should short as possible
How to create RPM packages
---------------------------
.. code-block:: shell
yum -y install gcc rpm-build git re2c php php-cli php-devel
.. code-block:: bash
#!/usr/bin/env bash
set -eu
mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
if [ ! -d /opt/src/fastdfs/build ]; then
mkdir -p /opt/src/fastdfs/build
cd /opt/src/fastdfs
git clone https://github.com/happyfish100/fastdfs.git
git clone https://github.com/happyfish100/libfastcommon.git
fi
cd /opt/src/fastdfs
# fastdfs build require libfastcommon
rsync -az --exclude=.git --delete /opt/src/fastdfs/libfastcommon/ /opt/src/fastdfs/build/libfastcommon-1.0.27/
# fastdfs tracker and stroage
rsync -az --exclude=.git --delete /opt/src/fastdfs/fastdfs/ /opt/src/fastdfs/build/fastdfs-5.0.8/
# fastdfs_client for php extension
rsync -az --exclude=.git --delete /opt/src/fastdfs/fastdfs/php_client/ /opt/src/fastdfs/build/fastdfs_client-5.0.8/
cd /opt/src/fastdfs/build
tar zcf fastdfs-5.0.8.tar.gz fastdfs-5.0.8/
tar zcf fastdfs_client-5.0.8.tar.gz fastdfs_client-5.0.8/
tar zcf libfastcommon-1.0.27.tar.gz libfastcommon-1.0.27/
cp -f *.tar.gz ~/rpmbuild/SOURCES/
cp -f fastdfs-5.0.8/fastdfs.spec ~/rpmbuild/SPECS/fastdfs.spec
cp -f fastdfs_client-5.0.8/fastdfs_client.spec.in ~/rpmbuild/SPECS/fastdfs_client.spec
cp -f libfastcommon-1.0.27/libfastcommon.spec ~/rpmbuild/SPECS/libfastcommon.spec
cd ~
rpmbuild -bb ~/rpmbuild/SPECS/libfastcommon.spec
rpm -ivh --replacepkgs --replacefiles ~/rpmbuild/RPMS/x86_64/libfastcommon*
rpmbuild -bb ~/rpmbuild/SPECS/fastdfs.spec
rpm -ivh --replacepkgs --replacefiles ~/rpmbuild/RPMS/x86_64/libfdfsclient*
rpmbuild -bb ~/rpmbuild/SPECS/fastdfs_client.spec
ls ~/rpmbuild/RPMS/x86_64

View File

@ -0,0 +1,48 @@
Introduction
=======================
Copyright (C) 2008 Happy Fish / YuQing
FastDFS may be copied only under the terms of the GNU General
Public License V3, which may be found in the FastDFS source kit.
Please visit the FastDFS Home Page for more detail.
* English language: http://english.csource.org/
* Chinese language: http://www.csource.org/
FastDFS is an open source high performance distributed file system. It's major
functions include: file storing, file syncing and file accessing (file uploading
and file downloading), and it can resolve the high capacity and load balancing
problem. FastDFS should meet the requirement of the website whose service based
on files such as photo sharing site and video sharing site.
FastDFS has two roles: tracker and storage. The tracker takes charge of
scheduling and load balancing for file access. The storage store files and it's
function is file management including: file storing, file syncing, providing file
access interface. It also manage the meta data which are attributes representing
as key value pair of the file. For example: width=1024, the key is "width" and
the value is "1024".
The tracker and storage contain one or more servers. The servers in the tracker
or storage cluster can be added to or removed from the cluster by any time without
affecting the online services. The servers in the tracker cluster are peer to peer.
The storarge servers organizing by the file volume/group to obtain high capacity.
The storage system contains one or more volumes whose files are independent among
these volumes. The capacity of the whole storage system equals to the sum of all
volumes' capacity. A file volume contains one or more storage servers whose files
are same among these servers. The servers in a file volume backup each other,
and all these servers are load balancing. When adding a storage server to a
volume, files already existing in this volume are replicated to this new server
automatically, and when this replication done, system will switch this server
online to providing storage services.
When the whole storage capacity is insufficiency, you can add one or more
volumes to expand the storage capacity. To do this, you need to add one or
more storage servers.
The identification of a file is composed of two parts: the volume name and
the file name.
Client test code use client library please refer to the directory: client/test.

459
doc/en/fastdfs/protocol.rst Normal file
View File

@ -0,0 +1,459 @@
Protocol
=======================
The communication protocol of FastDFS is TCP/IP, the package composes of header and body which may be empty.
header format
-----------------------
.. code-block:: ini
@ TRACKER_PROTO_PKG_LEN_SIZE bytes package length
@ 1 byte command
@ 1 byte status
note::
# TRACKER_PROTO_PKG_LEN_SIZE (8) bytes number buff is Big-Endian bytes
body format
-----------------------
1. common command
^^^^^^^^^^^^^^^^^^^^^^^
* FDFS_PROTO_CMD_QUIT
::
# function: notify server connection will be closed
# request body: none (no body part)
# response: none (no header and no body)
* FDFS_PROTO_CMD_ACTIVE_TEST
::
# function: active test
# request body: none
# response body: none
2. storage server to tracker server command
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* the reponse command is TRACKER_PROTO_CMD_STORAGE_RESP
* TRACKER_PROTO_CMD_STORAGE_JOIN
::
# function: storage join to tracker
# request body:
@ FDFS_GROUP_NAME_MAX_LEN + 1 bytes: group name
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage http server port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: path count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: subdir count per path
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: upload priority
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: join time (join timestamp)
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: up time (start timestamp)
@ FDFS_VERSION_SIZE bytes: storage server version
@ FDFS_DOMAIN_NAME_MAX_SIZE bytes: domain name of the web server on the storage server
@ 1 byte: init flag ( 1 for init done)
@ 1 byte: storage server status
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: tracker server count excluding current tracker
# response body:
@ FDFS_IPADDR_SIZE bytes: sync source storage server ip address
# memo: return all storage servers in the group only when storage servers changed or return none
* TRACKER_PROTO_CMD_STORAGE_BEAT
::
# function: heart beat
# request body: none or storage stat info
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total upload count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success upload count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total set metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success set metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total delete count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success delete count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total download count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success download count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total get metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success get metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total create link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success create link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total delete link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success delete link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last source update timestamp
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last sync update timestamp
@TRACKER_PROTO_PKG_LEN_SIZE bytes: last synced timestamp
@TRACKER_PROTO_PKG_LEN_SIZE bytes: last heart beat timestamp
# response body: n * (1 + FDFS_IPADDR_SIZE) bytes, n >= 0. One storage entry format:
@ 1 byte: storage server status
@ FDFS_IPADDR_SIZE bytes: storage server ip address
# memo: storage server sync it's stat info to tracker server only when storage stat info changed
* TRACKER_PROTO_CMD_STORAGE_REPORT
::
# function: report disk usage
# request body 1 or more than 1 following entries:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total space in MB
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: free space in MB
# response body: same to command TRACKER_PROTO_CMD_STORAGE_BEAT
* TRACKER_PROTO_CMD_STORAGE_REPLICA_CHG
::
# function: replica new storage servers which maybe not exist in the tracker server
# request body: n * (1 + FDFS_IPADDR_SIZE) bytes, n >= 1. One storage entry format:
@ 1 byte: storage server status
@ FDFS_IPADDR_SIZE bytes: storage server ip address
# response body: none
* TRACKER_PROTO_CMD_STORAGE_SYNC_SRC_REQ
::
# function: source storage require sync. when add a new storage server, the existed storage servers in the same group will ask the tracker server to tell the source storage server which will sync old data to it
# request body:
@ FDFS_GROUP_NAME_MAX_LEN: group name
@ FDFS_IPADDR_SIZE bytes: dest storage server (new storage server) ip address
# response body: none or
@ FDFS_IPADDR_SIZE bytes: source storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: sync until timestamp
# memo: if the dest storage server not do need sync from one of storage servers in the group, the response body is emtpy
* TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_REQ
::
# function: dest storage server (new storage server) require sync
# request body: none
# response body: none or
@ FDFS_IPADDR_SIZE bytes: source storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: sync until timestamp
# memo: if the dest storage server not do need sync from one of storage servers in the group, the response body is emtpy
* TRACKER_PROTO_CMD_STORAGE_SYNC_NOTIFY
::
# function: new storage server sync notify
# request body:
@ FDFS_IPADDR_SIZE bytes: source storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: sync until timestamp
# response body: same to command TRACKER_PROTO_CMD_STORAGE_BEAT
3. client to tracker server command
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* the reponse command of following 2 commands is TRACKER_PROTO_CMD_SERVER_RESP
* TRACKER_PROTO_CMD_SERVER_LIST_GROUP
::
# function: list all groups
# request body: none
# response body: n group entries, n >= 0, the format of each entry:
@ FDFS_GROUP_NAME_MAX_LEN+1 bytes: group name
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: free disk storage in MB
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server http port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: active server count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: current write server index
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: store path count on storage server
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: subdir count per path on storage server
* TRACKER_PROTO_CMD_SERVER_LIST_STORAGE
::
# function: list storage servers of a group
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: the group name to query
# response body: n storage entries, n >= 0, the format of each entry:
@ 1 byte: status
@ FDFS_IPADDR_SIZE bytes: ip address
@ FDFS_DOMAIN_NAME_MAX_SIZE bytes : domain name of the web server
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: source storage server ip address
@ FDFS_VERSION_SIZE bytes: storage server version
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: join time (join in timestamp)
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: up time (start timestamp)
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total space in MB
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: free space in MB
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: upload priority
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: store path count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: subdir count per path
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: current write path[
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage http port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total upload count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success upload count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total set metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success set metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total delete count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success delete count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total download count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success download count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total get metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success get metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total create link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success create link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total delete link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success delete link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last source update timestamp
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last sync update timestamp
@TRACKER_PROTO_PKG_LEN_SIZE bytes: last synced timestamp
@TRACKER_PROTO_PKG_LEN_SIZE bytes: last heart beat timestamp
* the reponse command of following 2 commands is TRACKER_PROTO_CMD_SERVICE_RESP
* TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE
::
# function: query which storage server to store file
# request body: none
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@1 byte: store path index on the storage server
* TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL
::
# function: query which storage server to store file
# request body: none
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address (* multi)
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port (*multi)
@1 byte: store path index on the storage server
* TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE
::
# function: query which storage server to store file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@1 byte: store path index on the storage server
* TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL
::
# function: query which storage server to store file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address (* multi)
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port (* multi)
@1 byte: store path index on the storage server
* TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH
::
# function: query which storage server to download the file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
* TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL
::
# function: query all storage servers to download the file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@ n * (FDFS_IPADDR_SIZE - 1) bytes: storage server ip addresses, n can be 0
* TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE
::
# function: query which storage server to download the file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
4. storage server to storage server command
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* the reponse command is STORAGE_PROTO_CMD_RESP
* STORAGE_PROTO_CMD_SYNC_CREATE_FILE
::
# function: sync new created file
# request body:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: filename bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file size/bytes
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes : filename
@ file size bytes: file content
# response body: none
* STORAGE_PROTO_CMD_SYNC_DELETE_FILE
::
# function: sync deleted file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body: none
* STORAGE_PROTO_CMD_SYNC_UPDATE_FILE
::
# function: sync updated file
# request body: same to command STORAGE_PROTO_CMD_SYNC_CREATE_FILE
# respose body: none
5. client to storage server command
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* the reponse command is STORAGE_PROTO_CMD_RESP
* STORAGE_PROTO_CMD_UPLOAD_FILE
::
# function: upload file to storage server
# request body:
@ 1 byte: store path index on the storage server
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file size
@ FDFS_FILE_EXT_NAME_MAX_LEN bytes: file ext name, do not include dot (.)
@ file size bytes: file content
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
* STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE
# function: upload slave file to storage server
# request body:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: master filename length
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file size
@ FDFS_FILE_PREFIX_MAX_LEN bytes: filename prefix
@ FDFS_FILE_EXT_NAME_MAX_LEN bytes: file ext name, do not include dot (.)
@ master filename bytes: master filename
@ file size bytes: file content
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
* STORAGE_PROTO_CMD_DELETE_FILE
::
# function: delete file from storage server
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body: none
* STORAGE_PROTO_CMD_SET_METADATA
::
# function: delete file from storage server
# request body:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: filename length
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: meta data size
@ 1 bytes: operation flag,
'O' for overwrite all old metadata
'M' for merge, insert when the meta item not exist, otherwise update it
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
@ meta data bytes: each meta data seperated by \x01,
name and value seperated by \x02
# response body: none
* STORAGE_PROTO_CMD_DOWNLOAD_FILE
::
# function: download/fetch file from storage server
# request body:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file offset
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: download file bytes
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body:
@ file content
* STORAGE_PROTO_CMD_GET_METADATA
::
# function: get metat data from storage server
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body
@ meta data buff, each meta data seperated by \x01, name and value seperated by \x02
* STORAGE_PROTO_CMD_QUERY_FILE_INFO
::
# function: query file info from storage server
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file size
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file create timestamp
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file CRC32 signature

24
doc/en/index.rst Normal file
View File

@ -0,0 +1,24 @@
.. FastDFS documentation master file, created by
sphinx-quickstart on Wed May 4 10:59:19 2016.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to FastDFS's documentation!
===================================
Contents:
.. toctree::
:maxdepth: 2
fastdfs/introduction
fastdfs/install
fastdfs/protocol
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

263
doc/en/make.bat Normal file
View File

@ -0,0 +1,263 @@
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
set I18NSPHINXOPTS=%SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. xml to make Docutils-native XML files
echo. pseudoxml to make pseudoxml-XML files for display purposes
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
echo. coverage to run coverage check of the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
REM Check if sphinx-build is available and fallback to Python version if any
%SPHINXBUILD% 2> nul
if errorlevel 9009 goto sphinx_python
goto sphinx_ok
:sphinx_python
set SPHINXBUILD=python -m sphinx.__init__
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
:sphinx_ok
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\FastDFS.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\FastDFS.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdf" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf
cd %~dp0
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdfja" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf-ja
cd %~dp0
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
if "%1" == "coverage" (
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
if errorlevel 1 exit /b 1
echo.
echo.Testing of coverage in the sources finished, look at the ^
results in %BUILDDIR%/coverage/python.txt.
goto end
)
if "%1" == "xml" (
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The XML files are in %BUILDDIR%/xml.
goto end
)
if "%1" == "pseudoxml" (
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
goto end
)
:end

192
doc/zh/Makefile Normal file
View File

@ -0,0 +1,192 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " applehelp to make an Apple Help Book"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " coverage to run coverage check of the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/FastDFS.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/FastDFS.qhc"
applehelp:
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
@echo
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
@echo "N.B. You won't be able to view it unless you put it in" \
"~/Library/Documentation/Help or install it in your application" \
"bundle."
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/FastDFS"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/FastDFS"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
coverage:
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
@echo "Testing of coverage in the sources finished, look at the " \
"results in $(BUILDDIR)/coverage/python.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

358
doc/zh/conf.py Normal file
View File

@ -0,0 +1,358 @@
# -*- coding: utf-8 -*-
#
# FastDFS documentation build configuration file, created by
# sphinx-quickstart on Wed May 4 10:58:17 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FastDFS'
copyright = u'2016, Copyright (C) 2008 Happy Fish / YuQing'
author = u'Copyright (C) 2008 Happy Fish / YuQing'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5.0.8'
# The full version, including alpha/beta/rc tags.
release = '5.0.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'FastDFSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FastDFS.tex', u'FastDFS Documentation',
u'Copyright (C) 2008 Happy Fish / YuQing', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fastdfs', u'FastDFS Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'FastDFS', u'FastDFS Documentation',
author, 'FastDFS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True

View File

@ -0,0 +1,48 @@
Introduction
=======================
Copyright (C) 2008 Happy Fish / YuQing
FastDFS may be copied only under the terms of the GNU General
Public License V3, which may be found in the FastDFS source kit.
Please visit the FastDFS Home Page for more detail.
* English language: http://english.csource.org/
* Chinese language: http://www.csource.org/
FastDFS is an open source high performance distributed file system. It's major
functions include: file storing, file syncing and file accessing (file uploading
and file downloading), and it can resolve the high capacity and load balancing
problem. FastDFS should meet the requirement of the website whose service based
on files such as photo sharing site and video sharing site.
FastDFS has two roles: tracker and storage. The tracker takes charge of
scheduling and load balancing for file access. The storage store files and it's
function is file management including: file storing, file syncing, providing file
access interface. It also manage the meta data which are attributes representing
as key value pair of the file. For example: width=1024, the key is "width" and
the value is "1024".
The tracker and storage contain one or more servers. The servers in the tracker
or storage cluster can be added to or removed from the cluster by any time without
affecting the online services. The servers in the tracker cluster are peer to peer.
The storarge servers organizing by the file volume/group to obtain high capacity.
The storage system contains one or more volumes whose files are independent among
these volumes. The capacity of the whole storage system equals to the sum of all
volumes' capacity. A file volume contains one or more storage servers whose files
are same among these servers. The servers in a file volume backup each other,
and all these servers are load balancing. When adding a storage server to a
volume, files already existing in this volume are replicated to this new server
automatically, and when this replication done, system will switch this server
online to providing storage services.
When the whole storage capacity is insufficiency, you can add one or more
volumes to expand the storage capacity. To do this, you need to add one or
more storage servers.
The identification of a file is composed of two parts: the volume name and
the file name.
Client test code use client library please refer to the directory: client/test.

View File

@ -0,0 +1,114 @@
Benchmark
=======================
测试方案及测试结果
一、测试方案
-----------------------
服务器配置情况如下: ::
1台test server 2CPU (Intel Xeon 2.80GHz), 2GB RAM, 80GB Disk
1台tracker server2CPU (Intel Xeon 2.80GHz), 2GB RAM, 80GB Disk
4台storage server2CPU (Intel Xeon 2.80GHz), 4GB RAM, 1TB Disk (2 * 512GB SATA)
测试环境的带宽为1G服务器直接接到千兆交换机上服务器之间通信时互不影响。
存储节点分为2个组每组2台服务器。
测试文件大小及数量:
========== ==========
文件大小 文件数量
========== ==========
5KB 100万个
50KB 200万个
200KB 100万个
1MB 20万个
10MB 2万个
100MB 1千个
========== ==========
每种类型的文件只要一个即可采用重复上传的方式。比如5KB的一个文件重复上传100万次。
和平台的连接采用短连接方式。
测试文件内容中的每个字节值为随机数文件内容直接存放在内存中通过API直接把文件内容上传到存储平台。
上传和下载文件时采用随机的方法。以文件总数量为准进行随机抽选。上传文件时对于某种类型的文件如5KB类型如 果已经达到了上传的文件数量如100万个则 不再上传。
需要进行并发测试,采用进程方式,每个进程各自记录日志文件。
* Upload10个并发进程需要记录每个进程运行花费的时间需要在日志文件中记录上传文件ID及文件类型
* Download20个并发进程共运行8小时需要记录下载完成数量和成功数量。下载下来的文件直接舍弃不需要保存到磁盘也不需要保存到内存
* Delete10个并发进程需要记录每个进程运行花费的时间
需要记录两个纬度的统计信息:
1. 按storage server统计记录的字段有ip地址、操作总次数、成功次数
2. 按文件类型 统计:文件类型、操作总次数、成功次数
操作分别为upload、download和delete
文件类型分别为5KB、50KB、200KB、1MB等等
二、测试结果
-----------------------
存储节点的2块磁盘采用了LVM做了RAID0没有使用RAID设备磁盘的IO较差。写了一个小的测试程序直接write 1GB的文件每次写1MB磁盘IO只能达到3MB多一点。
1. Upload ::
total_count=4221000, success_count=4221000, success ratio: 100.00% time_used=118369s, avg time used: 28ms, QPS=35.66
file_type total_count success_count time_used(s) avg(ms) QPS success_ratio
5K 1000000 1000000 25178 25 39.72 100.00
50K 2000000 2000000 52328 26 38.22 100.00
200K 1000000 1000000 30177 30 33.14 100.00
1M 200000 200000 7397 36 27.04 100.00
10M 20000 20000 2223 111 9.00 100.00
100M 1000 1000 861 861 1.16 100.00
IO speed = 6902 KB
ip_addr total_count success_count time_used(s) avg(ms) QPS success_ratio
119.42.240.98 1055253 1055253 30797 29 34.26 100.00
119.42.240.103 1055246 1055246 30144 28 35.01 100.00
119.42.240.99 1055262 1055262 29855 28 35.35 100.00
119.42.240.100 1055239 1055239 27368 25 38.56 100.00
2. Download ::
total_count=1142383, success_count=1142383, success ratio: 100.00% time_used=28800s, avg time used: 25ms, QPS=39.67
file_type total_count success_count time_used(s) avg(ms) QPS success_ratio
5K 268491 268491 3822 14 70.25 100.00
50K 542050 542050 9937 18 54.55 100.00
200K 271837 271837 6937 25 39.19 100.00
1M 54389 54389 3450 63 15.76 100.00
10M 5283 5283 2866 542 1.84 100.00
100M 333 333 1770 5317 0.19 100.00
IO speed = 7871 KB
ip_addr total_count success_count time_used(s) avg(ms) QPS success_ratio
119.42.240.100 284907 284907 7076 24 40.26 100.00
119.42.240.103 284901 284901 6479 22 43.97 100.00
119.42.240.98 286288 286288 8359 29 34.25 100.00
119.42.240.99 286287 286287 6870 23 41.67 100.00
3. Delete ::
total_count=4221000, success_count=4221000, success ratio: 100.00% time_used=6565s, avg time used: 1ms, QPS=642.96
file_type total_count success_count time_used(s) avg(ms) QPS success_ratio
5K 1000000 1000000 337 0 2967.36 100.00
50K 2000000 2000000 3588 1 557.41 100.00
200K 1000000 1000000 1847 1 541.42 100.00
1M 200000 200000 378 1 529.10 100.00
10M 20000 20000 135 6 148.15 100.00
100M 1000 1000 51 51 19.61 100.00
ip_addr total_count success_count time_used(s) avg(ms) QPS success_ratio
119.42.240.98 1055253 1055253 1626 1 648.99 100.00
119.42.240.103 1055246 1055246 1542 1 684.34 100.00
119.42.240.99 1055262 1055262 1620 1 651.40 100.00
119.42.240.100 1055239 1055239 1550 1 680.80 100.00

231
doc/zh/fastdfs/faq.rst Normal file
View File

@ -0,0 +1,231 @@
FAQ
============
定位问题首先要看日志文件。出现问题时,先检查返回的错误号和错误信息。然后查看服务器端日志,相信可以定位到问题所在。
1. FastDFS适用的场景以及不适用的场景
FastDFS是为互联网应用量身定做的一套分布式文件存储系统非常适合用来存储用户图片、视频、文档等文件。对于互联网应用和其他分布式文件系统相比优势非常明显。
具体情况大家可以看相关的介绍文档包括FastDFS介绍PPT等等。
出于简洁考虑FastDFS没有对文件做分块存储因此不太适合分布式计算场景。
2. FastDFS需要的编译和运行环境是怎样的
FastDFS Server仅支持unix系统在Linux和FreeBSD测试通过。在Solaris系统下网络通信方面有些问题。
编译需要的其他库文件有pthreadV5.0以前的版本依赖libeventV5.0以后不再依赖libevent。
v5.04开始依赖libfastcommongithub地址https://github.com/happyfish100/libfastcommon
v5版本从v5.05开始才是稳定版本请使用v5版本的同学尽快升级到v5.05或更新的版本建议升级到v5.08。
pthread使用系统自带的即可。
对libevent的版本要求为1.4.x建议使用最新的stable版本如1.4.14b。
注意千万不要使用libevent 2.0非stable版本。
测试了一下libevent 2.0.10是可以正常工作的。
在64位系统下可能需要自己在/usr/lib64下创建libevent.so的符号链接。比如
ln -s /usr/lib/libevent.so /usr/lib64/libevent.so
在ubuntu 11及后续版本可能会出现找不到动态库pthread库解决方法参见http://bbs.chinaunix.net/thread-2324388-1-2.html
若出现libfastcommon版本不匹配问题请执行如下命令/bin/rm -rf /usr/local/lib/libfastcommon.so /usr/local/include/fastcommon
3. 有人在生产环境中使用FastDFS吗
答案是肯定的。据我所知截止2012年底至少有25家公司在使用FastDFS其中有好几家是做网盘的公司。
其中存储量最大的一家集群中存储group数有400个存储服务器超过800台存储容量达到6PB文件数超过1亿Group持续增长中。。。
以下是使用FastDFS的用户列表
#. 某大型网盘因对方要求对公司名保密就不提供名字了。有400个group存储容量达到了6PB文件数超过1亿
#. UC http://www.uc.cn/存储容量超过10TB
#. 支付宝http://www.alipay.com/
#. 京东商城http://www.360buy.com/
#. 淘淘搜http://www.taotaosou.com/
#. 飞信http://feixin.1008**/
#. 赶集网http://www.ganji.com/
#. 淘米网http://www.61.com/
#. 迅雷http://www.xunlei.com/
#. 蚂蜂窝http://www.mafengwo.cn/
#. 丫丫网http://www.iyaya.com/
#. 虹网http://3g.ahong.com
#. 5173http://www.5173.com/
#. 华夏原创网http://www.yuanchuang.com/
#. 华师京城教育云平台http://www.hsjdy.com.cn/
#. 视友网http://www.cuctv.com/
#. 搜道网http://www.sodao.com/
#. 58同城http://www.58.com/
#. 商务联盟网http://www.biz72.com/
#. 中青网http://www.youth.cn/
#. 缤丽网 http://www.binliy.com/
#. 飞视云视频http://www.freeovp.com/
#. 梦芭莎http://www.moonbasa.com/
#. 活动帮http://www.eventsboom.com
#. 51CTOhttp://www.51cto.com/
#. 搜房网http://www.soufun.com/
4. 启动storage server时一直处于僵死状态。
A启动storage serverstorage将连接tracker server如果连不上将一直重试。直到连接成功启动才算真正完成。
出现这样情况请检查连接不上tracker server的原因。
友情提示从V2.03以后多tracker server在启动时会做时间上的检测判断是否需要从别的tracker server同步4个系统文件。
触发时机是第一个storage server连接上tracker server后并发起join请求。
如果集群中有2台tracker server而其中一台tracker没有启动可能会导致storage server一直处于僵死状态。
这个问题v5.07解决了。
5. 执行fdfs_test或fdfs_test1上传文件时服务器返回错误号2
错误号表示没有ACTIVE状态的storage server。可以执行fdfs_monitor查看服务器状态。
6. 如何让server进程退出运行
直接kill即可让server进程正常退出可以使用killall命令例如 ::
killall fdfs_trackerd
killall fdfs_storaged
也可以使用如下命令: ::
/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf stop
/usr/bin/fdfs_storaged /etc/fdfs/storage.conf stop
千万不要使用-9参数强杀否则可能会导致binlog数据丢失的问题。
7. 如何重启server进程
直接使用: ::
/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf restart
/usr/bin/fdfs_storaged /etc/fdfs/storage.conf restart
8. 跨运营商通信异常问题
比如电信和网通机房相互通信,可能会存在异常,有两种表现:
#. 不能建立连接,这个比较直接,肯定是网络连接的问题
#. 可以正常建立连接,但接收和发送数据失败,这个问题比较隐蔽,正常网络环境下,不应该出现此类问题。
还有人碰到过从一个方向建立连接可以正常通信,但从另外一个方向就不能正常通信的情况。
解决办法:
尝试将服务端口改小建议将端口修改为1024以下。比如将storage服务端口由23000修改为873等也可以试试修改为8080
如果问题还不能解决,请联系你的网络(机房)服务商。
9. fdfs_test和fdfs_test1是做什么用的
这两个是FastDFS自带的测试程序会对一个文件上传两次分别作为主文件和从文件。返回的文件ID也是两个。
并且会上传文件附加属性storage server上会生成4个文件。
这两个程序仅用于测试目的,请不要用作实际用途。
V2.05提供了比较正式的三个小工具:
* 上传文件:/usr/bin/fdfs_upload_file <config_file> <local_filename>
* 下载文件:/usr/bin/fdfs_download_file <config_file> <file_id> [local_filename]
* 删除文件:/usr/bin/fdfs_delete_file <config_file> <file_id>
10. 什么是主从文件?
主从文件是指文件ID有关联的文件一个主文件可以对应多个从文件。
::
主文件ID = 主文件名 + 主文件扩展名
从文件ID = 主文件名 + 从文件后缀名 + 从文件扩展名
使用主从文件的一个典型例子:以图片为例,主文件为原始图片,从文件为该图片的一张或多张缩略图。
FastDFS中的主从文件只是在文件ID上有联系。FastDFS server端没有记录主从文件对应关系因此删除主文件FastDFS不会自动删除从文件。
删除主文件后,从文件的级联删除,需要由应用端来实现。
主文件及其从文件均存放到同一个group中。
主从文件的生成顺序:
#. 先上传主文件如原文件得到主文件ID
#. 然后上传从文件如缩略图指定主文件ID和从文件后缀名当然还可以同时指定从文件扩展名得到从文件ID。
11. 如何删除无效的storage server
可以使用fdfs_monitor来删除。命令行如下
::
/usr/bin/fdfs_monitor <config_filename> delete <group_name> <storage_id>
例如:
::
/usr/bin/fdfs_monitor /etc/fdfs/client.conf delete group1 192.168.0.100
注意如果被删除的storage server的状态是ACTIVE也就是该storage server还在线上服务的情况下是无法删除掉的。
storage_id参数如果使用默认的ip方式填写storage server IP地址否则使用对应的server id。
12. FastDFS扩展模块升级到V1.06及以上版本的注意事项
apache和nginx扩展模块版本v1.06及以上版本,需要在配置文件/etc/fdfs/fastdfs_mod.conf中设置storage server的存储路径信息。
一个示例如下所示:
::
store_path_count=1
store_path0=/home/yuqing/fastdfs
#store_path_count和store_path
#均需要正确设置必须和storage.conf中的相应配置完全一致否则将导致文件不能正确下载
13. nginx和apache扩展模块与FastDFS server版本对应关系
扩展模块1.05 针对FastDFs server v2.x要求server版本大于等于v2.09
扩展模块1.07及以上版本: 针对FastDFs server v3.x
具体的版本匹配情况参阅扩展模块源码下的HISTORY文件
14. FastDFS有QQ技术交流群吗
有的。群号164684842欢迎大家加入交流。
15. 上传文件失败返回错误码28这是怎么回事
返回错误码28表示磁盘空间不足。注意FastDFS中有预留空间的概念在tracker.conf中设置配置项为reserved_storage_space缺省值为4GB即预留4GB的空间。
请酌情设置reserved_storage_space这个参数比如可以设置为磁盘总空间的20%左右。
16. fdfs_trackerd或者fdfs_storaged的日志中出现malloc task buff failed字样的错误这是怎么回事
出现此类信息表示已经达到最大连接数。server端支持的最大连接数可以通过max_connections这个参数来设置。
出现这样的问题,需要排查一下是否客户端使用不当导致的,比如客户端没有及时关闭无用的连接。
17. FastDFS的文件ID中可以反解出哪些字段
文件ID中除了包含group name和存储路径外文件名中可以反解出如下几个字段
#. 文件创建时间unix时间戳32位整数
#. 文件大小
#. 上传到的源storage server IP地址32位整数
#. 文件crc32校验码
#. 随机数(这个字段用来避免文件重名)
18. 为什么生成的token验证无法通过
出现这样的问题,请进行如下两项检查:
#. 确认调用token生成函数传递的文件ID中没有包含group name。传递的文件ID格式形如M00/00/1B/wKgnVE84utyOG9hEAAATz5-S0SI99.java
#. 确认服务器时间基本是一致的,注意服务器时间不能相差太多,不要相差到分钟级别。
19. 最新程序包的下载地址是什么?
因google code不支持上传程序包最新的程序包可以在sourceforge上下载下载地址https://sourceforge.net/projects/fastdfs/files/
20. FastDFS支持断点续传吗
可以支持。先上传appender类型的文件然后使用apend函数。

164
doc/zh/fastdfs/howto.rst Normal file
View File

@ -0,0 +1,164 @@
How to
=======================
文件及目录结构
-----------------------
FastDFS服务器端运行时目录结构如下
::
${base_path}
|__data存放数据文件
|__logs存放日志文件
其中,${base_path}由配置文件中的参数“base_path”设定。
一、tracker server
^^^^^^^^^^^^^^^^^^^^^^^
tracker server目录及文件结构
::
${base_path}
|__data
| |__storage_groups.dat存储分组信息
| |__storage_servers.dat存储服务器列表
|__logs
|__trackerd.logtracker server日志文件
数据文件storage_groups.dat和storage_servers.dat中的记录之间以换行符\n分隔字段之间以西文逗号,)分隔。
storage_groups.dat中的字段依次为
1. group_name组名
2. storage_portstorage server端口号
storage_servers.dat中记录storage server相关信息字段依次为
1. group_name所属组名
2. ip_addrip地址
3. status状态
4. sync_src_ip_addr向该storage server同步已有数据文件的源服务器
5. sync_until_timestamp同步已有数据文件的截至时间UNIX时间戳
6. stat.total_upload_count上传文件次数
7. stat.success_upload_count成功上传文件次数
8. stat.total_set_meta_count更改meta data次数
9. stat.success_set_meta_count成功更改meta data次数
10. stat.total_delete_count删除文件次数
11. stat.success_delete_count成功删除文件次数
12. stat.total_download_count下载文件次数
13. stat.success_download_count成功下载文件次数
14. stat.total_get_meta_count获取meta data次数
15. stat.success_get_meta_count成功获取meta data次数
16. stat.last_source_update最近一次源头更新时间更新操作来自客户端
17. stat.last_sync_update最近一次同步更新时间更新操作来自其他storage server的同步
二、storage server
^^^^^^^^^^^^^^^^^^^^^^^
storage server目录及文件结构
::
${base_path}
|__data
| |__.data_init_flag当前storage server初始化信息
| |__storage_stat.dat当前storage server统计信息
| |__sync存放数据同步相关文件
| | |__binlog.index当前的binlog更新操作日志文件索引号
| | |__binlog.###:存放更新操作记录(日志)
| | |__${ip_addr}_${port}.mark存放向目标服务器同步的完成情况
| |
| |__一级目录256个存放数据文件的目录目录名为十六进制字符00, 1F
| |__二级目录256个存放数据文件的目录目录名为十六进制字符0A, CF
|__logs
|__storaged.logstorage server日志文件
.data_init_flag文件格式为ini配置文件方式各个参数如下
::
# storage_join_time本storage server创建时间
# sync_old_done本storage server是否已完成同步的标志源服务器向本服务器同步已有数据
# sync_src_server向本服务器同步已有数据的源服务器IP地址没有则为空
# sync_until_timestamp同步已有数据文件截至时间UNIX时间戳
storage_stat.dat文件格式为ini配置文件方式各个参数如下
::
# total_upload_count上传文件次数
# success_upload_count成功上传文件次数
# total_set_meta_count更改meta data次数
# success_set_meta_count成功更改meta data次数
# total_delete_count删除文件次数
# success_delete_count成功删除文件次数
# total_download_count下载文件次数
# success_download_count成功下载文件次数
# total_get_meta_count获取meta data次数
# success_get_meta_count成功获取meta data次数
# last_source_update最近一次源头更新时间更新操作来自客户端
# last_sync_update最近一次同步更新时间更新操作来自其他storage server
binlog.index中只有一个数据项当前binlog的文件索引号
binlog.######为索引号对应的3位十进制字符不足三位前面补0。索引号基于0最大为999。一个binlog文件最大为1GB。记录之间以换行符\n分隔字段之间以西文空格分隔。字段依次为
1. timestamp更新发生时间Unix时间戳
2. op_type操作类型一个字符
3. filename操作更新的文件名包括相对路径5A/3D/VKQ-CkpWmo0AAAAAAKqTJj0eiic6891.a
${ip_addr}_${port}.markip_addr为同步的目标服务器IP地址port为本组storage server端口。例如10.0.0.1_23000.mark。文件格式为ini配置文件方式各个参数如下
::
# binlog_index已处理同步到的binlog索引号
# binlog_offset已处理同步到的binlog文件偏移量字节数
# need_sync_old同步已有数据文件标记0表示没有数据文件需要同步
# sync_old_done同步已有数据文件是否完成标记0表示未完成1表示已完成
# until_timestamp同步已有数据截至时间点UNIX时间戳
# scan_row_count已扫描的binlog记录数
# sync_row_count已同步的binlog记录数
数据文件名由系统自动生成包括5部分存储服务器IP地址、当前时间Unix时间戳、文件大小字节数、随机数和文件后缀。文件名长度为33字节。文件可以按目录顺序存放也可以按照PJW Hash算法hash到65536256*256个目录中分散存储通过配置文件控制。
同步机制
-----------------------
在FastDFS的服务器端配置文件中bind_addr这个参数用于需要绑定本机IP地址的场合。只有这个参数和主机特征相关其余参数都是可以统一配置的。在不需要绑定本机的情况下为了便于管理和维护建议所有tracker server的配置文件相同同组内的所有storage server的配置文件相同。
tracker server的配置文件中没有出现storage server而storage server的配置文件中会列举出所有的tracker server。这就决定了storage server和tracker server之间的连接由storage server主动发起storage server为每个tracker server启动一个线程进行连接和通讯这部分的通信协议请参阅
《FastDFS HOWTO -- Protocol》
中的“2. storage server to tracker server command”部分。
tracker server会在内存中保存storage分组及各个组下的storage server并将连接过自己的storage server及其分组保存到文件中以便下次重启服务时能直接从本地磁盘中获得storage相关信息。storage server会在内存中记录本组的所有服务器并将服务器信息记录到文件中。tracker server和storage server之间相互同步storage server列表
1. 如果一个组内增加了新的storage server或者storage server的状态发生了改变tracker server都会将storage server列表同步给该组内的所有storage server。以新增storage server为例因为新加入的storage server主动连接tracker servertracker server发现有新的storage server加入就会将该组内所有的storage server返回给新加入的storage server并重新将该组的storage server列表返回给该组内的其他storage server
2. 如果新增加一台tracker serverstorage server连接该tracker server发现该tracker server返回的本组storage server列表比本机记录的要少就会将该tracker server上没有的storage server同步给该tracker server。
同一组内的storage server之间是对等的文件上传、删除等操作可以在任意一台storage server上进行。文件同步只在同组内的storage server之间进行采用push方式即源服务器同步给目标服务器。以文件上传为例假设一个组内有3台storage server A、B和C文件F上传到服务器B由B将文件F同步到其余的两台服务器A和C。我们不妨把文件F上传到服务器B的操作为源头操作在服务器B上的F文件为源头数据文件F被同步到服务器A和C的操作为备份操作在A和C上的F文件为备份数据。同步规则总结如下
1. 只在本组内的storage server之间进行同步
2. 源头数据才需要同步,备份数据不需要再次同步,否则就构成环路了;
3. 上述第二条规则有个例外就是新增加一台storage server时由已有的一台storage server将已有的所有数据包括源头数据和备份数据同步给该新增服务器。
storage server有7个状态如下
::
# FDFS_STORAGE_STATUS_INIT :初始化,尚未得到同步已有数据的源服务器
# FDFS_STORAGE_STATUS_WAIT_SYNC :等待同步,已得到同步已有数据的源服务器
# FDFS_STORAGE_STATUS_SYNCING :同步中
# FDFS_STORAGE_STATUS_DELETED :已删除,该服务器从本组中摘除(注:本状态的功能尚未实现)
# FDFS_STORAGE_STATUS_OFFLINE :离线
# FDFS_STORAGE_STATUS_ONLINE :在线,尚不能提供服务
# FDFS_STORAGE_STATUS_ACTIVE :在线,可以提供服务
当storage server的状态为FDFS_STORAGE_STATUS_ONLINE时当该storage server向tracker server发起一次heart beat时tracker server将其状态更改为FDFS_STORAGE_STATUS_ACTIVE。
组内新增加一台storage server A时由系统自动完成已有数据同步处理逻辑如下
1. storage server A连接tracker servertracker server将storage server A的状态设置为FDFS_STORAGE_STATUS_INIT。storage server A询问追加同步的源服务器和追加同步截至时间点如果该组内只有storage server A或该组内已成功上传的文件数为0则没有数据需要同步storage server A就可以提供在线服务此时tracker将其状态设置为FDFS_STORAGE_STATUS_ONLINE否则tracker server将其状态设置为FDFS_STORAGE_STATUS_WAIT_SYNC进入第二步的处理
2. 假设tracker server分配向storage server A同步已有数据的源storage server为B。同组的storage server和tracker server通讯得知新增了storage server A将启动同步线程并向tracker server询问向storage server A追加同步的源服务器和截至时间点。storage server B将把截至时间点之前的所有数据同步给storage server A而其余的storage server从截至时间点之后进行正常同步只把源头数据同步给storage server A。到了截至时间点之后storage server B对storage server A的同步将由追加同步切换为正常同步只同步源头数据
3. storage server B向storage server A同步完所有数据暂时没有数据要同步时storage server B请求tracker server将storage server A的状态设置为FDFS_STORAGE_STATUS_ONLINE
4. 当storage server A向tracker server发起heart beat时tracker server将其状态更改为FDFS_STORAGE_STATUS_ACTIVE。

318
doc/zh/fastdfs/install.rst Normal file
View File

@ -0,0 +1,318 @@
Installation
======================
Copy right 2009 Happy Fish / YuQing
FastDFS may be copied only under the terms of the GNU General
Public License V3, which may be found in the FastDFS source kit.
Please visit the FastDFS Home Page for more detail.
* English language: http://english.csource.org/
* Chinese language: http://www.csource.org/
Installing FastDFS from Source
--------------------------------
step 1. download libfastcommon source package from github and install it
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
the github address:
https://github.com/happyfish100/libfastcommon.git
step 2. download FastDFS source package and unpack it,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
tar xzf FastDFS_v5.x.tar.gz
for example:
::
tar xzf FastDFS_v5.08.tar.gz
step 3. enter the FastDFS dir
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
cd FastDFS
step 4. execute
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
./make.sh
step 5. make install
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
./make.sh install
step 6. edit/modify the config file of tracker and storage
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
step 7. run server programs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
start the *tracker* server:
::
/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf restart
in Linux, you can start fdfs_trackerd as a service:
::
/sbin/service fdfs_trackerd start
start the *storage* server:
::
/usr/bin/fdfs_storaged /etc/fdfs/storage.conf restart
in Linux, you can start fdfs_storaged as a service:
::
/sbin/service fdfs_storaged start
step 8. run test program
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
run the client test program:
::
/usr/bin/fdfs_test <client_conf_filename> <operation>
/usr/bin/fdfs_test1 <client_conf_filename> <operation>
for example, upload a file:
::
/usr/bin/fdfs_test conf/client.conf upload /usr/include/stdlib.h
step 9. run monitor program
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
run the monitor program:
::
/usr/bin/fdfs_monitor <client_conf_filename>
tracker server config file sample please see conf/tracker.conf
storage server config file sample please see conf/storage.conf
client config file sample please see conf/client.conf
Item detail
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
1. server common items
+-----------------------+--------+---------+------+
| item name | type | default | Must |
+=======================+========+=========+======+
| base_path | string | | Y |
+-----------------------+--------+---------+------+
| disabled | boolean| false | N |
+-----------------------+--------+---------+------+
| bind_addr | string | | N |
+-----------------------+--------+---------+------+
| network_timeout | int | 30(s) | N |
+-----------------------+--------+---------+------+
| max_connections | int | 256 | N |
+-----------------------+--------+---------+------+
| log_level | string | info | N |
+-----------------------+--------+---------+------+
| run_by_group | string | | N |
+-----------------------+--------+---------+------+
| run_by_user | string | | N |
+-----------------------+--------+---------+------+
| allow_hosts | string | \* | N |
+-----------------------+--------+---------+------+
| sync_log_buff_interval| int | 10(s) | N |
+-----------------------+--------+---------+------+
| thread_stack_size | string | 1M | N |
+-----------------------+--------+---------+------+
memo::
* base_path is the base path of sub dirs:
data and logs. base_path must exist and it's sub dirs will
be automatically created if not exist.
$base_path/data: store data files
$base_path/logs: store log files
* log_level is the standard log level as syslog, case insensitive
# emerg: for emergency
# alert
# crit: for critical
# error
# warn: for warning
# notice
# info
# debug
* allow_hosts can ocur more than once, host can be hostname or ip address,
"*" means match all ip addresses, can use range like this: 10.0.1.[1-15,20]
or host[01-08,20-25].domain.com, for example:
allow_hosts=10.0.1.[1-15,20]
allow_hosts=host[01-08,20-25].domain.com
2. tracker server items
+-----------------------+--------+---------+------+
| item name | type | default | Must |
+=======================+========+=========+======+
| port | int | 22000 | N |
+-----------------------+--------+---------+------+
| store_lookup | int | 0 | N |
+-----------------------+--------+---------+------+
| store_group | string | | N |
+-----------------------+--------+---------+------+
| store_server | int | 0 | N |
+-----------------------+--------+---------+------+
| store_path | int | 0 | N |
+-----------------------+--------+---------+------+
| download_server | int | 0 | N |
+-----------------------+--------+---------+------+
| reserved_storage_space| string | 1GB | N |
+-----------------------+--------+---------+------+
memo::
* the value of store_lookup is:
0: round robin (default)
1: specify group
2: load balance (supported since V1.1)
* store_group is the name of group to store files.
when store_lookup set to 1(specify group),
store_group must be set to a specified group name.
* reserved_storage_space is the reserved storage space for system
or other applications. if the free(available) space of any stoarge
server in a group <= reserved_storage_space, no file can be uploaded
to this group (since V1.1)
bytes unit can be one of follows:
# G or g for gigabyte(GB)
# M or m for megabyte(MB)
# K or k for kilobyte(KB)
# no unit for byte(B)
3. storage server items
+--------------------------+--------+---------+------+
| item name | type | default | Must |
+==========================+========+=========+======+
| group_name | string | | Y |
+--------------------------+--------+---------+------+
| tracker_server | string | | Y |
+--------------------------+--------+---------+------+
| port | int | 23000 | N |
+--------------------------+--------+---------+------+
| heart_beat_interval | int | 30(s) | N |
+--------------------------+--------+---------+------+
| stat_report_interval | int | 300(s) | N |
+--------------------------+--------+---------+------+
| sync_wait_msec | int | 100(ms) | N |
+--------------------------+--------+---------+------+
| sync_interval | int | 0(ms) | N |
+--------------------------+--------+---------+------+
| sync_start_time | string | 00:00 | N |
+--------------------------+--------+---------+------+
| sync_end_time | string | 23:59 | N |
+--------------------------+--------+---------+------+
| store_path_count | int | 1 | N |
+--------------------------+--------+---------+------+
| store_path0 | string |base_path| N |
+--------------------------+--------+---------+------+
| store_path# | string | | N |
+--------------------------+--------+---------+------+
|subdir_count_per_path | int | 256 | N |
+--------------------------+--------+---------+------+
|check_file_duplicate | boolean| 0 | N |
+--------------------------+--------+---------+------+
| key_namespace | string | | N |
+--------------------------+--------+---------+------+
| keep_alive | boolean| 0 | N |
+--------------------------+--------+---------+------+
| sync_binlog_buff_interval| int | 60s | N |
+--------------------------+--------+---------+------+
memo::
* tracker_server can ocur more than once, and tracker_server format is
"host:port", host can be hostname or ip address.
* store_path#, # for digital, based 0
* check_file_duplicate: when set to true, must work with FastDHT server,
more detail please see INSTALL of FastDHT. FastDHT download page:
http://code.google.com/p/fastdht/downloads/list
* key_namespace: FastDHT key namespace, can't be empty when
check_file_duplicate is true. the key namespace should short as possible
How to create RPM packages
---------------------------
.. code-block:: shell
yum -y install gcc rpm-build git re2c php php-cli php-devel
.. code-block:: bash
#!/usr/bin/env bash
set -eu
mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
if [ ! -d /opt/src/fastdfs/build ]; then
mkdir -p /opt/src/fastdfs/build
cd /opt/src/fastdfs
git clone https://github.com/happyfish100/fastdfs.git
git clone https://github.com/happyfish100/libfastcommon.git
fi
cd /opt/src/fastdfs
# fastdfs build require libfastcommon
rsync -az --exclude=.git --delete /opt/src/fastdfs/libfastcommon/ /opt/src/fastdfs/build/libfastcommon-1.0.27/
# fastdfs tracker and stroage
rsync -az --exclude=.git --delete /opt/src/fastdfs/fastdfs/ /opt/src/fastdfs/build/fastdfs-5.0.8/
# fastdfs_client for php extension
rsync -az --exclude=.git --delete /opt/src/fastdfs/fastdfs/php_client/ /opt/src/fastdfs/build/fastdfs_client-5.0.8/
cd /opt/src/fastdfs/build
tar zcf fastdfs-5.0.8.tar.gz fastdfs-5.0.8/
tar zcf fastdfs_client-5.0.8.tar.gz fastdfs_client-5.0.8/
tar zcf libfastcommon-1.0.27.tar.gz libfastcommon-1.0.27/
cp -f *.tar.gz ~/rpmbuild/SOURCES/
cp -f fastdfs-5.0.8/fastdfs.spec ~/rpmbuild/SPECS/fastdfs.spec
cp -f fastdfs_client-5.0.8/fastdfs_client.spec.in ~/rpmbuild/SPECS/fastdfs_client.spec
cp -f libfastcommon-1.0.27/libfastcommon.spec ~/rpmbuild/SPECS/libfastcommon.spec
cd ~
rpmbuild -bb ~/rpmbuild/SPECS/libfastcommon.spec
rpm -ivh --replacepkgs --replacefiles ~/rpmbuild/RPMS/x86_64/libfastcommon*
rpmbuild -bb ~/rpmbuild/SPECS/fastdfs.spec
rpm -ivh --replacepkgs --replacefiles ~/rpmbuild/RPMS/x86_64/libfdfsclient*
rpmbuild -bb ~/rpmbuild/SPECS/fastdfs_client.spec
ls ~/rpmbuild/RPMS/x86_64

459
doc/zh/fastdfs/protocol.rst Normal file
View File

@ -0,0 +1,459 @@
Protocol
=======================
The communication protocol of FastDFS is TCP/IP, the package composes of header and body which may be empty.
header format
-----------------------
.. code-block:: ini
@ TRACKER_PROTO_PKG_LEN_SIZE bytes package length
@ 1 byte command
@ 1 byte status
note::
# TRACKER_PROTO_PKG_LEN_SIZE (8) bytes number buff is Big-Endian bytes
body format
-----------------------
1. common command
^^^^^^^^^^^^^^^^^^^^^^^
* FDFS_PROTO_CMD_QUIT
::
# function: notify server connection will be closed
# request body: none (no body part)
# response: none (no header and no body)
* FDFS_PROTO_CMD_ACTIVE_TEST
::
# function: active test
# request body: none
# response body: none
2. storage server to tracker server command
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* the reponse command is TRACKER_PROTO_CMD_STORAGE_RESP
* TRACKER_PROTO_CMD_STORAGE_JOIN
::
# function: storage join to tracker
# request body:
@ FDFS_GROUP_NAME_MAX_LEN + 1 bytes: group name
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage http server port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: path count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: subdir count per path
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: upload priority
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: join time (join timestamp)
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: up time (start timestamp)
@ FDFS_VERSION_SIZE bytes: storage server version
@ FDFS_DOMAIN_NAME_MAX_SIZE bytes: domain name of the web server on the storage server
@ 1 byte: init flag ( 1 for init done)
@ 1 byte: storage server status
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: tracker server count excluding current tracker
# response body:
@ FDFS_IPADDR_SIZE bytes: sync source storage server ip address
# memo: return all storage servers in the group only when storage servers changed or return none
* TRACKER_PROTO_CMD_STORAGE_BEAT
::
# function: heart beat
# request body: none or storage stat info
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total upload count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success upload count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total set metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success set metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total delete count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success delete count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total download count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success download count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total get metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success get metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total create link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success create link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total delete link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success delete link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last source update timestamp
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last sync update timestamp
@TRACKER_PROTO_PKG_LEN_SIZE bytes: last synced timestamp
@TRACKER_PROTO_PKG_LEN_SIZE bytes: last heart beat timestamp
# response body: n * (1 + FDFS_IPADDR_SIZE) bytes, n >= 0. One storage entry format:
@ 1 byte: storage server status
@ FDFS_IPADDR_SIZE bytes: storage server ip address
# memo: storage server sync it's stat info to tracker server only when storage stat info changed
* TRACKER_PROTO_CMD_STORAGE_REPORT
::
# function: report disk usage
# request body 1 or more than 1 following entries:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total space in MB
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: free space in MB
# response body: same to command TRACKER_PROTO_CMD_STORAGE_BEAT
* TRACKER_PROTO_CMD_STORAGE_REPLICA_CHG
::
# function: replica new storage servers which maybe not exist in the tracker server
# request body: n * (1 + FDFS_IPADDR_SIZE) bytes, n >= 1. One storage entry format:
@ 1 byte: storage server status
@ FDFS_IPADDR_SIZE bytes: storage server ip address
# response body: none
* TRACKER_PROTO_CMD_STORAGE_SYNC_SRC_REQ
::
# function: source storage require sync. when add a new storage server, the existed storage servers in the same group will ask the tracker server to tell the source storage server which will sync old data to it
# request body:
@ FDFS_GROUP_NAME_MAX_LEN: group name
@ FDFS_IPADDR_SIZE bytes: dest storage server (new storage server) ip address
# response body: none or
@ FDFS_IPADDR_SIZE bytes: source storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: sync until timestamp
# memo: if the dest storage server not do need sync from one of storage servers in the group, the response body is emtpy
* TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_REQ
::
# function: dest storage server (new storage server) require sync
# request body: none
# response body: none or
@ FDFS_IPADDR_SIZE bytes: source storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: sync until timestamp
# memo: if the dest storage server not do need sync from one of storage servers in the group, the response body is emtpy
* TRACKER_PROTO_CMD_STORAGE_SYNC_NOTIFY
::
# function: new storage server sync notify
# request body:
@ FDFS_IPADDR_SIZE bytes: source storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: sync until timestamp
# response body: same to command TRACKER_PROTO_CMD_STORAGE_BEAT
3. client to tracker server command
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* the reponse command of following 2 commands is TRACKER_PROTO_CMD_SERVER_RESP
* TRACKER_PROTO_CMD_SERVER_LIST_GROUP
::
# function: list all groups
# request body: none
# response body: n group entries, n >= 0, the format of each entry:
@ FDFS_GROUP_NAME_MAX_LEN+1 bytes: group name
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: free disk storage in MB
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server http port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: active server count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: current write server index
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: store path count on storage server
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: subdir count per path on storage server
* TRACKER_PROTO_CMD_SERVER_LIST_STORAGE
::
# function: list storage servers of a group
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: the group name to query
# response body: n storage entries, n >= 0, the format of each entry:
@ 1 byte: status
@ FDFS_IPADDR_SIZE bytes: ip address
@ FDFS_DOMAIN_NAME_MAX_SIZE bytes : domain name of the web server
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: source storage server ip address
@ FDFS_VERSION_SIZE bytes: storage server version
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: join time (join in timestamp)
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: up time (start timestamp)
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total space in MB
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: free space in MB
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: upload priority
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: store path count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: subdir count per path
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: current write path[
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage http port
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total upload count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success upload count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total set metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success set metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total delete count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success delete count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total download count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success download count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total get metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success get metadata count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total create link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success create link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: total delete link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: success delete link count
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last source update timestamp
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: last sync update timestamp
@TRACKER_PROTO_PKG_LEN_SIZE bytes: last synced timestamp
@TRACKER_PROTO_PKG_LEN_SIZE bytes: last heart beat timestamp
* the reponse command of following 2 commands is TRACKER_PROTO_CMD_SERVICE_RESP
* TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE
::
# function: query which storage server to store file
# request body: none
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@1 byte: store path index on the storage server
* TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL
::
# function: query which storage server to store file
# request body: none
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address (* multi)
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port (*multi)
@1 byte: store path index on the storage server
* TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE
::
# function: query which storage server to store file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@1 byte: store path index on the storage server
* TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL
::
# function: query which storage server to store file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address (* multi)
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port (* multi)
@1 byte: store path index on the storage server
* TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH
::
# function: query which storage server to download the file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
* TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL
::
# function: query all storage servers to download the file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
@ n * (FDFS_IPADDR_SIZE - 1) bytes: storage server ip addresses, n can be 0
* TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE
::
# function: query which storage server to download the file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ FDFS_IPADDR_SIZE - 1 bytes: storage server ip address
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: storage server port
4. storage server to storage server command
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* the reponse command is STORAGE_PROTO_CMD_RESP
* STORAGE_PROTO_CMD_SYNC_CREATE_FILE
::
# function: sync new created file
# request body:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: filename bytes
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file size/bytes
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes : filename
@ file size bytes: file content
# response body: none
* STORAGE_PROTO_CMD_SYNC_DELETE_FILE
::
# function: sync deleted file
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body: none
* STORAGE_PROTO_CMD_SYNC_UPDATE_FILE
::
# function: sync updated file
# request body: same to command STORAGE_PROTO_CMD_SYNC_CREATE_FILE
# respose body: none
5. client to storage server command
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* the reponse command is STORAGE_PROTO_CMD_RESP
* STORAGE_PROTO_CMD_UPLOAD_FILE
::
# function: upload file to storage server
# request body:
@ 1 byte: store path index on the storage server
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file size
@ FDFS_FILE_EXT_NAME_MAX_LEN bytes: file ext name, do not include dot (.)
@ file size bytes: file content
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
* STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE
# function: upload slave file to storage server
# request body:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: master filename length
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file size
@ FDFS_FILE_PREFIX_MAX_LEN bytes: filename prefix
@ FDFS_FILE_EXT_NAME_MAX_LEN bytes: file ext name, do not include dot (.)
@ master filename bytes: master filename
@ file size bytes: file content
# response body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
* STORAGE_PROTO_CMD_DELETE_FILE
::
# function: delete file from storage server
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body: none
* STORAGE_PROTO_CMD_SET_METADATA
::
# function: delete file from storage server
# request body:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: filename length
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: meta data size
@ 1 bytes: operation flag,
'O' for overwrite all old metadata
'M' for merge, insert when the meta item not exist, otherwise update it
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
@ meta data bytes: each meta data seperated by \x01,
name and value seperated by \x02
# response body: none
* STORAGE_PROTO_CMD_DOWNLOAD_FILE
::
# function: download/fetch file from storage server
# request body:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file offset
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: download file bytes
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body:
@ file content
* STORAGE_PROTO_CMD_GET_METADATA
::
# function: get metat data from storage server
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body
@ meta data buff, each meta data seperated by \x01, name and value seperated by \x02
* STORAGE_PROTO_CMD_QUERY_FILE_INFO
::
# function: query file info from storage server
# request body:
@ FDFS_GROUP_NAME_MAX_LEN bytes: group name
@ filename bytes: filename
# response body:
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file size
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file create timestamp
@ TRACKER_PROTO_PKG_LEN_SIZE bytes: file CRC32 signature

27
doc/zh/index.rst Normal file
View File

@ -0,0 +1,27 @@
.. FastDFS documentation master file, created by
sphinx-quickstart on Wed May 4 10:58:17 2016.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to FastDFS's documentation!
===================================
Contents:
.. toctree::
:maxdepth: 2
fastdfs/introduction
fastdfs/install
fastdfs/howto
fastdfs/protocol
fastdfs/benchmark
fastdfs/faq
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

263
doc/zh/make.bat Normal file
View File

@ -0,0 +1,263 @@
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
set I18NSPHINXOPTS=%SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. xml to make Docutils-native XML files
echo. pseudoxml to make pseudoxml-XML files for display purposes
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
echo. coverage to run coverage check of the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
REM Check if sphinx-build is available and fallback to Python version if any
%SPHINXBUILD% 2> nul
if errorlevel 9009 goto sphinx_python
goto sphinx_ok
:sphinx_python
set SPHINXBUILD=python -m sphinx.__init__
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
:sphinx_ok
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\FastDFS.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\FastDFS.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdf" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf
cd %~dp0
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdfja" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf-ja
cd %~dp0
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
if "%1" == "coverage" (
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
if errorlevel 1 exit /b 1
echo.
echo.Testing of coverage in the sources finished, look at the ^
results in %BUILDDIR%/coverage/python.txt.
goto end
)
if "%1" == "xml" (
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The XML files are in %BUILDDIR%/xml.
goto end
)
if "%1" == "pseudoxml" (
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
goto end
)
:end