Adds ginkgo / gomega as a dependency
authorZachary Gershman <zgershman@pivotal.io>
Thu, 11 Feb 2016 00:30:23 +0000 (16:30 -0800)
committerzachgersh <zachgersh@gmail.com>
Mon, 29 Feb 2016 17:27:11 +0000 (12:27 -0500)
150 files changed:
Godeps/Godeps.json
Godeps/_workspace/src/github.com/onsi/ginkgo/.gitignore [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/.travis.yml [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/LICENSE [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/README.md [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table_entry.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/build_command.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/import.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert_command.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/generate_command.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/help_command.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/main.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/notifications.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_command.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/suite_runner.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/version_command.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/suite.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/integration.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/codelocation/code_location.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/containernode/container_node.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/failer/failer.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/it_node.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/runner.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/aggregator.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/server.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/index_computer.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/spec.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/specs.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/specrunner/random_id.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/suite/suite.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/writer/fake_writer.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/writer/writer.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/default_reporter.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/fake_reporter.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/junit_reporter.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/reporter.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/teamcity_reporter.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/types/code_location.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/types/synchronization.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/ginkgo/types/types.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/.gitignore [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/.travis.yml [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/CHANGELOG.md [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/LICENSE [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/README.md [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/format/format.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/gbytes/buffer.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/gbytes/say_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/gexec/build.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/gexec/exit_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/gexec/prefixed_writer.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/gexec/session.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/handlers.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/protobuf.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/test_server.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/gomega_dsl.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/internal/assertion/assertion.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/and.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_a_directory.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_a_regular_file.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_an_existing_file.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_closed_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_empty_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_false_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_nil_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_numerically_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_sent_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_temporally_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_true_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_zero_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/consist_of.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/contain_element_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/contain_substring_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/equal_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_key_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_len_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_occurred_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_prefix_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_suffix_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/match_error_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/match_json_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/match_regexp_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/not.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/or.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/panic_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/receive_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/succeed_matcher.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/node/node.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/util/util.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/type_support.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/matchers/with_transform.go [new file with mode: 0644]
Godeps/_workspace/src/github.com/onsi/gomega/types/types.go [new file with mode: 0644]
test

index df42da8..ae04ed0 100644 (file)
@@ -1,6 +1,6 @@
 {
        "ImportPath": "github.com/appc/cni",
-       "GoVersion": "go1.4.2",
+       "GoVersion": "go1.5.3",
        "Packages": [
                "./..."
        ],
                        "ImportPath": "github.com/d2g/dhcp4client",
                        "Rev": "bed07e1bc5b85f69c6f0fd73393aa35ec68ed892"
                },
+               {
+                       "ImportPath": "github.com/onsi/ginkgo",
+                       "Comment": "v1.2.0-29-g7f8ab55",
+                       "Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
+               },
+               {
+                       "ImportPath": "github.com/onsi/gomega",
+                       "Comment": "v1.0-71-g2152b45",
+                       "Rev": "2152b45fa28a361beba9aab0885972323a444e28"
+               },
                {
                        "ImportPath": "github.com/vishvananda/netlink",
                        "Rev": "ecf47fd5739b3d2c3daf7c89c4b9715a2605c21b"
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/.gitignore b/Godeps/_workspace/src/github.com/onsi/ginkgo/.gitignore
new file mode 100644 (file)
index 0000000..922b4f7
--- /dev/null
@@ -0,0 +1,4 @@
+.DS_Store
+TODO
+tmp/**/*
+*.coverprofile
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/.travis.yml b/Godeps/_workspace/src/github.com/onsi/ginkgo/.travis.yml
new file mode 100644 (file)
index 0000000..f0e67b8
--- /dev/null
@@ -0,0 +1,15 @@
+language: go
+go:
+  - 1.3
+  - 1.4
+  - 1.5
+  - tip
+
+install:
+  - go get -v -t ./...
+  - go get golang.org/x/tools/cmd/cover
+  - go get github.com/onsi/gomega
+  - go install github.com/onsi/ginkgo/ginkgo
+  - export PATH=$PATH:$HOME/gopath/bin
+
+script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md b/Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md
new file mode 100644 (file)
index 0000000..438c8c1
--- /dev/null
@@ -0,0 +1,136 @@
+## HEAD
+
+Improvements:
+
+- `Skip(message)` can be used to skip the current test.
+- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
+
+Bug Fixes:
+
+- Ginkgo tests now fail when you `panic(nil)` (#167)
+
+## 1.2.0 5/31/2015
+
+Improvements
+
+- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
+- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
+- Relaxed requirement for Go 1.4+.  `ginkgo` now works with Go v1.3+ (#166)
+
+## 1.2.0-beta
+
+Ginkgo now requires Go 1.4+
+
+Improvements:
+
+- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
+- Improved focus behavior.  Now, this:
+
+    ```golang
+    FDescribe("Some describe", func() {
+        It("A", func() {})
+
+        FIt("B", func() {})
+    })
+    ```
+
+  will run `B` but *not* `A`.  This tends to be a common usage pattern when in the thick of writing and debugging tests.
+- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`.  Useful for debugging stuck tests.
+- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`.  This is useful for debugging stuck tests and tests that generate many logs.
+- Improved output when an error occurs in a setup or teardown block.
+- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything.  Best paired with `-v` to understand which specs will run in which order.
+- Add `By` to help document long `It`s.  `By` simply writes to the `GinkgoWriter`.
+- Add support for precompiled tests:
+    - `ginkgo build <path-to-package>` will now compile the package, producing a file named `package.test`
+    - The compiled `package.test` file can be run directly.  This runs the tests in series.
+    - To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
+- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
+- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
+- The `ginkgo` CLI ignores `SIGQUIT`.  Prevents its stack dump from interlacing with the underlying test suite's stack dump.
+- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory.  This necessitates upgrading to Go v1.4+.
+- `ginkgo -notify` now works on Linux
+
+Bug Fixes:
+
+- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
+- Fix tempfile leak when running in parallel
+- Fix incorrect failure message when a panic occurs during a parallel test run
+- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
+- Be more consistent about handling SIGTERM as well as SIGINT
+- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
+- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
+
+## 1.1.0 (8/2/2014)
+
+No changes, just dropping the beta.
+
+## 1.1.0-beta (7/22/2014)
+New Features:
+
+- `ginkgo watch` now monitors packages *and their dependencies* for changes.  The depth of the dependency tree can be modified with the `-depth` flag.
+- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass.  This allows CI systems to detect accidental commits of focused test suites.
+- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
+- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
+- `ginkgo --failFast` aborts the test suite after the first failure.
+- `ginkgo generate file_1 file_2` can take multiple file arguments.
+- Ginkgo now summarizes any spec failures that occured at the end of the test run. 
+- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
+
+Improvements:
+
+- `ginkgo -skipPackage` now takes a comma-separated list of strings.  If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
+- `ginkgo --untilItFails` no longer recompiles between attempts.
+- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node.  This is always a mistake.  Any test suites that panic because of this change should be fixed.
+
+Bug Fixes:
+
+- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
+- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
+
+## 1.0.0 (5/24/2014)
+New Features:
+
+- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
+
+Improvements:
+
+- When compilation fails, the compilation output is rewritten to present a correct *relative* path.  Allows âŒ˜-clicking in iTerm open the file in your text editor.
+- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
+
+Bug Fixes:
+
+- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
+- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
+- Fix all remaining race conditions in Ginkgo's test suite.
+
+## 1.0.0-beta (4/14/2014)
+Breaking changes:
+
+- `thirdparty/gomocktestreporter` is gone.  Use `GinkgoT()` instead
+- Modified the Reporter interface 
+- `watch` is now a subcommand, not a flag.
+
+DSL changes:
+
+- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
+- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
+- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
+
+CLI changes:
+
+- `watch` is now a subcommand, not a flag
+- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports.  This explicitly imports all exported identifiers in Ginkgo and Gomega.  Refreshing this list can be done by running `ginkgo nodot`
+- Additional arguments can be passed to specs.  Pass them after the `--` separator
+- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
+- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
+
+Misc:
+
+- Start using semantic versioning
+- Start maintaining changelog
+
+Major refactor:
+
+- Pull out Ginkgo's internal to `internal`
+- Rename `example` everywhere to `spec`
+- Much more!
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/LICENSE b/Godeps/_workspace/src/github.com/onsi/ginkgo/LICENSE
new file mode 100644 (file)
index 0000000..9415ee7
--- /dev/null
@@ -0,0 +1,20 @@
+Copyright (c) 2013-2014 Onsi Fakhouri
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/README.md b/Godeps/_workspace/src/github.com/onsi/ginkgo/README.md
new file mode 100644 (file)
index 0000000..b8b77b5
--- /dev/null
@@ -0,0 +1,115 @@
+![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
+
+[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo)
+
+Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more.  To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
+
+To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
+
+## Feature List
+
+- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests.  It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
+
+- Structure your BDD-style tests expressively:
+    - Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
+    - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
+    - [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
+    - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
+    - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
+
+- A comprehensive test runner that lets you:
+    - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
+    - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
+    - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
+    - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
+
+- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files.  Here are a few choice examples:
+    - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
+    - `ginkgo -cover` runs your tests using Golang's code coverage tool
+    - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
+    - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
+    - `ginkgo -r` runs all tests suites under the current directory
+    - `ginkgo -v` prints out identifying information for each tests just before it runs
+
+    And much more: run `ginkgo help` for details!
+
+    The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
+
+- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests.  Run tests immediately as you develop!
+
+- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
+
+- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code.  Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. 
+
+- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
+
+- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify).  Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
+
+- A modular architecture that lets you easily:
+    - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
+    - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
+
+## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
+
+Ginkgo is best paired with Gomega.  Learn more about Gomega [here](http://onsi.github.io/gomega/)
+
+## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
+
+Agouti allows you run WebDriver integration tests.  Learn more about Agouti [here](http://agouti.org)
+
+## Set Me Up!
+
+You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
+
+```bash
+
+go get github.com/onsi/ginkgo/ginkgo  # installs the ginkgo CLI
+go get github.com/onsi/gomega         # fetches the matcher library
+
+cd path/to/package/you/want/to/test
+
+ginkgo bootstrap # set up a new ginkgo suite
+ginkgo generate  # will create a sample test file.  edit this file and add your tests then...
+
+go test # to run your tests
+
+ginkgo  # also runs your tests
+
+```
+
+## I'm new to Go: What are my testing options?
+
+Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega).  Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
+
+With that said, it's great to know what your options are :)
+
+### What Golang gives you out of the box
+
+Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
+
+### Matcher libraries for Golang's XUnit style tests
+
+A number of matcher libraries have been written to augment Go's built-in XUnit style tests.  Here are two that have gained traction:
+
+- [testify](https://github.com/stretchr/testify)
+- [gocheck](http://labix.org/gocheck)
+
+You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
+
+### BDD style testing frameworks
+
+There are a handful of BDD-style testing frameworks written for Golang.  Here are a few:
+
+- [Ginkgo](https://github.com/onsi/ginkgo) ;)
+- [GoConvey](https://github.com/smartystreets/goconvey) 
+- [Goblin](https://github.com/franela/goblin)
+- [Mao](https://github.com/azer/mao)
+- [Zen](https://github.com/pranavraja/zen)
+
+Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
+
+Go explore!
+
+## License
+
+Ginkgo is MIT-Licensed
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go
new file mode 100644 (file)
index 0000000..46ce16a
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+Ginkgo accepts a number of configuration options.
+
+These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
+
+You can also learn more via
+
+       ginkgo help
+
+or (I kid you not):
+
+       go test -asdf
+*/
+package config
+
+import (
+       "flag"
+       "time"
+
+       "fmt"
+)
+
+const VERSION = "1.2.0"
+
+type GinkgoConfigType struct {
+       RandomSeed        int64
+       RandomizeAllSpecs bool
+       FocusString       string
+       SkipString        string
+       SkipMeasurements  bool
+       FailOnPending     bool
+       FailFast          bool
+       EmitSpecProgress  bool
+       DryRun            bool
+
+       ParallelNode  int
+       ParallelTotal int
+       SyncHost      string
+       StreamHost    string
+}
+
+var GinkgoConfig = GinkgoConfigType{}
+
+type DefaultReporterConfigType struct {
+       NoColor           bool
+       SlowSpecThreshold float64
+       NoisyPendings     bool
+       Succinct          bool
+       Verbose           bool
+       FullTrace         bool
+}
+
+var DefaultReporterConfig = DefaultReporterConfigType{}
+
+func processPrefix(prefix string) string {
+       if prefix != "" {
+               prefix = prefix + "."
+       }
+       return prefix
+}
+
+func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
+       prefix = processPrefix(prefix)
+       flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
+       flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together.  By default, ginkgo only randomizes the top level Describe/Context groups.")
+       flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
+       flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
+       flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
+       flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything.  Best paired with -v.")
+       flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
+       flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
+       flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
+
+       if includeParallelFlags {
+               flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number.  For running specs in parallel.")
+               flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes.  For running specs in parallel.")
+               flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
+               flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
+       }
+
+       flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
+       flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter (default: 5 seconds).")
+       flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
+       flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
+       flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
+       flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
+}
+
+func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
+       prefix = processPrefix(prefix)
+       result := make([]string, 0)
+
+       if ginkgo.RandomSeed > 0 {
+               result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
+       }
+
+       if ginkgo.RandomizeAllSpecs {
+               result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
+       }
+
+       if ginkgo.SkipMeasurements {
+               result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
+       }
+
+       if ginkgo.FailOnPending {
+               result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
+       }
+
+       if ginkgo.FailFast {
+               result = append(result, fmt.Sprintf("--%sfailFast", prefix))
+       }
+
+       if ginkgo.DryRun {
+               result = append(result, fmt.Sprintf("--%sdryRun", prefix))
+       }
+
+       if ginkgo.FocusString != "" {
+               result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
+       }
+
+       if ginkgo.SkipString != "" {
+               result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
+       }
+
+       if ginkgo.EmitSpecProgress {
+               result = append(result, fmt.Sprintf("--%sprogress", prefix))
+       }
+
+       if ginkgo.ParallelNode != 0 {
+               result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
+       }
+
+       if ginkgo.ParallelTotal != 0 {
+               result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
+       }
+
+       if ginkgo.StreamHost != "" {
+               result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
+       }
+
+       if ginkgo.SyncHost != "" {
+               result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
+       }
+
+       if reporter.NoColor {
+               result = append(result, fmt.Sprintf("--%snoColor", prefix))
+       }
+
+       if reporter.SlowSpecThreshold > 0 {
+               result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
+       }
+
+       if !reporter.NoisyPendings {
+               result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
+       }
+
+       if reporter.Verbose {
+               result = append(result, fmt.Sprintf("--%sv", prefix))
+       }
+
+       if reporter.Succinct {
+               result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
+       }
+
+       if reporter.FullTrace {
+               result = append(result, fmt.Sprintf("--%strace", prefix))
+       }
+
+       return result
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table.go
new file mode 100644 (file)
index 0000000..ae8ab7d
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+
+Table provides a simple DSL for Ginkgo-native Table-Driven Tests
+
+The godoc documentation describes Table's API.  More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests
+
+*/
+
+package table
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/onsi/ginkgo"
+)
+
+/*
+DescribeTable describes a table-driven test.
+
+For example:
+
+    DescribeTable("a simple table",
+        func(x int, y int, expected bool) {
+            Î©(x > y).Should(Equal(expected))
+        },
+        Entry("x > y", 1, 0, true),
+        Entry("x == y", 0, 0, false),
+        Entry("x < y", 0, 1, false),
+    )
+
+The first argument to `DescribeTable` is a string description.
+The second argument is a function that will be run for each table entry.  Your assertions go here - the function is equivalent to a Ginkgo It.
+The subsequent arguments must be of type `TableEntry`.  We recommend using the `Entry` convenience constructors.
+
+The `Entry` constructor takes a string description followed by an arbitrary set of parameters.  These parameters are passed into your function.
+
+Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`.  Each `Entry` is turned into an `It` within the `Describe`.
+
+It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run).
+
+Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry).  In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable.
+*/
+func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
+       describeTable(description, itBody, entries, false, false)
+       return true
+}
+
+/*
+You can focus a table with `FDescribeTable`.  This is equivalent to `FDescribe`.
+*/
+func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
+       describeTable(description, itBody, entries, false, true)
+       return true
+}
+
+/*
+You can mark a table as pending with `PDescribeTable`.  This is equivalent to `PDescribe`.
+*/
+func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
+       describeTable(description, itBody, entries, true, false)
+       return true
+}
+
+/*
+You can mark a table as pending with `XDescribeTable`.  This is equivalent to `XDescribe`.
+*/
+func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
+       describeTable(description, itBody, entries, true, false)
+       return true
+}
+
+func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) {
+       itBodyValue := reflect.ValueOf(itBody)
+       if itBodyValue.Kind() != reflect.Func {
+               panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody))
+       }
+
+       if pending {
+               ginkgo.PDescribe(description, func() {
+                       for _, entry := range entries {
+                               entry.generateIt(itBodyValue)
+                       }
+               })
+       } else if focused {
+               ginkgo.FDescribe(description, func() {
+                       for _, entry := range entries {
+                               entry.generateIt(itBodyValue)
+                       }
+               })
+       } else {
+               ginkgo.Describe(description, func() {
+                       for _, entry := range entries {
+                               entry.generateIt(itBodyValue)
+                       }
+               })
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table_entry.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table_entry.go
new file mode 100644 (file)
index 0000000..a6a9e3c
--- /dev/null
@@ -0,0 +1,72 @@
+package table
+
+import (
+       "reflect"
+
+       "github.com/onsi/ginkgo"
+)
+
+/*
+TableEntry represents an entry in a table test.  You generally use the `Entry` constructor.
+*/
+type TableEntry struct {
+       Description string
+       Parameters  []interface{}
+       Pending     bool
+       Focused     bool
+}
+
+func (t TableEntry) generateIt(itBody reflect.Value) {
+       if t.Pending {
+               ginkgo.PIt(t.Description)
+               return
+       }
+
+       values := []reflect.Value{}
+       for _, param := range t.Parameters {
+               values = append(values, reflect.ValueOf(param))
+       }
+
+       body := func() {
+               itBody.Call(values)
+       }
+
+       if t.Focused {
+               ginkgo.FIt(t.Description, body)
+       } else {
+               ginkgo.It(t.Description, body)
+       }
+}
+
+/*
+Entry constructs a TableEntry.
+
+The first argument is a required description (this becomes the content of the generated Ginkgo `It`).
+Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`.
+
+Each Entry ends up generating an individual Ginkgo It.
+*/
+func Entry(description string, parameters ...interface{}) TableEntry {
+       return TableEntry{description, parameters, false, false}
+}
+
+/*
+You can focus a particular entry with FEntry.  This is equivalent to FIt.
+*/
+func FEntry(description string, parameters ...interface{}) TableEntry {
+       return TableEntry{description, parameters, false, true}
+}
+
+/*
+You can mark a particular entry as pending with PEntry.  This is equivalent to PIt.
+*/
+func PEntry(description string, parameters ...interface{}) TableEntry {
+       return TableEntry{description, parameters, true, false}
+}
+
+/*
+You can mark a particular entry as pending with XEntry.  This is equivalent to XIt.
+*/
+func XEntry(description string, parameters ...interface{}) TableEntry {
+       return TableEntry{description, parameters, true, false}
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
new file mode 100644 (file)
index 0000000..d804fe0
--- /dev/null
@@ -0,0 +1,182 @@
+package main
+
+import (
+       "bytes"
+       "flag"
+       "fmt"
+       "os"
+       "path/filepath"
+       "strings"
+       "text/template"
+
+       "go/build"
+
+       "github.com/onsi/ginkgo/ginkgo/nodot"
+)
+
+func BuildBootstrapCommand() *Command {
+       var agouti, noDot bool
+       flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError)
+       flagSet.BoolVar(&agouti, "agouti", false, "If set, bootstrap will generate a bootstrap file for writing Agouti tests")
+       flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega")
+
+       return &Command{
+               Name:         "bootstrap",
+               FlagSet:      flagSet,
+               UsageCommand: "ginkgo bootstrap <FLAGS>",
+               Usage: []string{
+                       "Bootstrap a test suite for the current package",
+                       "Accepts the following flags:",
+               },
+               Command: func(args []string, additionalArgs []string) {
+                       generateBootstrap(agouti, noDot)
+               },
+       }
+}
+
+var bootstrapText = `package {{.Package}}_test
+
+import (
+       {{.GinkgoImport}}
+       {{.GomegaImport}}
+
+       "testing"
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+       RegisterFailHandler(Fail)
+       RunSpecs(t, "{{.FormattedName}} Suite")
+}
+`
+
+var agoutiBootstrapText = `package {{.Package}}_test
+
+import (
+       {{.GinkgoImport}}
+       {{.GomegaImport}}
+       "github.com/sclevine/agouti"
+
+       "testing"
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+       RegisterFailHandler(Fail)
+       RunSpecs(t, "{{.FormattedName}} Suite")
+}
+
+var agoutiDriver *agouti.WebDriver
+
+var _ = BeforeSuite(func() {
+       // Choose a WebDriver:
+
+       agoutiDriver = agouti.PhantomJS()
+       // agoutiDriver = agouti.Selenium()
+       // agoutiDriver = agouti.ChromeDriver()
+
+       Expect(agoutiDriver.Start()).To(Succeed())
+})
+
+var _ = AfterSuite(func() {
+       Expect(agoutiDriver.Stop()).To(Succeed())
+})
+`
+
+type bootstrapData struct {
+       Package       string
+       FormattedName string
+       GinkgoImport  string
+       GomegaImport  string
+}
+
+func getPackageAndFormattedName() (string, string, string) {
+       path, err := os.Getwd()
+       if err != nil {
+               complainAndQuit("Could not get current working directory: \n" + err.Error())
+       }
+
+       dirName := strings.Replace(filepath.Base(path), "-", "_", -1)
+       dirName = strings.Replace(dirName, " ", "_", -1)
+
+       pkg, err := build.ImportDir(path, 0)
+       packageName := pkg.Name
+       if err != nil {
+               packageName = dirName
+       }
+
+       formattedName := prettifyPackageName(filepath.Base(path))
+       return packageName, dirName, formattedName
+}
+
+func prettifyPackageName(name string) string {
+       name = strings.Replace(name, "-", " ", -1)
+       name = strings.Replace(name, "_", " ", -1)
+       name = strings.Title(name)
+       name = strings.Replace(name, " ", "", -1)
+       return name
+}
+
+func fileExists(path string) bool {
+       _, err := os.Stat(path)
+       if err == nil {
+               return true
+       }
+       return false
+}
+
+func generateBootstrap(agouti bool, noDot bool) {
+       packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
+       data := bootstrapData{
+               Package:       packageName,
+               FormattedName: formattedName,
+               GinkgoImport:  `. "github.com/onsi/ginkgo"`,
+               GomegaImport:  `. "github.com/onsi/gomega"`,
+       }
+
+       if noDot {
+               data.GinkgoImport = `"github.com/onsi/ginkgo"`
+               data.GomegaImport = `"github.com/onsi/gomega"`
+       }
+
+       targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
+       if fileExists(targetFile) {
+               fmt.Printf("%s already exists.\n\n", targetFile)
+               os.Exit(1)
+       } else {
+               fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
+       }
+
+       f, err := os.Create(targetFile)
+       if err != nil {
+               complainAndQuit("Could not create file: " + err.Error())
+               panic(err.Error())
+       }
+       defer f.Close()
+
+       var templateText string
+       if agouti {
+               templateText = agoutiBootstrapText
+       } else {
+               templateText = bootstrapText
+       }
+
+       bootstrapTemplate, err := template.New("bootstrap").Parse(templateText)
+       if err != nil {
+               panic(err.Error())
+       }
+
+       buf := &bytes.Buffer{}
+       bootstrapTemplate.Execute(buf, data)
+
+       if noDot {
+               contents, err := nodot.ApplyNoDot(buf.Bytes())
+               if err != nil {
+                       complainAndQuit("Failed to import nodot declarations: " + err.Error())
+               }
+               fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot")
+               buf = bytes.NewBuffer(contents)
+       }
+
+       buf.WriteTo(f)
+
+       goFmt(targetFile)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/build_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/build_command.go
new file mode 100644 (file)
index 0000000..bbba8a1
--- /dev/null
@@ -0,0 +1,68 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+       "os"
+       "path/filepath"
+
+       "github.com/onsi/ginkgo/ginkgo/interrupthandler"
+       "github.com/onsi/ginkgo/ginkgo/testrunner"
+)
+
+func BuildBuildCommand() *Command {
+       commandFlags := NewBuildCommandFlags(flag.NewFlagSet("build", flag.ExitOnError))
+       interruptHandler := interrupthandler.NewInterruptHandler()
+       builder := &SpecBuilder{
+               commandFlags:     commandFlags,
+               interruptHandler: interruptHandler,
+       }
+
+       return &Command{
+               Name:         "build",
+               FlagSet:      commandFlags.FlagSet,
+               UsageCommand: "ginkgo build <FLAGS> <PACKAGES>",
+               Usage: []string{
+                       "Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
+                       "Accepts the following flags:",
+               },
+               Command: builder.BuildSpecs,
+       }
+}
+
+type SpecBuilder struct {
+       commandFlags     *RunWatchAndBuildCommandFlags
+       interruptHandler *interrupthandler.InterruptHandler
+}
+
+func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
+       r.commandFlags.computeNodes()
+
+       suites, _ := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, false)
+
+       if len(suites) == 0 {
+               complainAndQuit("Found no test suites")
+       }
+
+       passed := true
+       for _, suite := range suites {
+               runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil)
+               fmt.Printf("Compiling %s...\n", suite.PackageName)
+
+               path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
+               err := runner.CompileTo(path)
+               if err != nil {
+                       fmt.Println(err.Error())
+                       passed = false
+               } else {
+                       fmt.Printf("    compiled %s.test\n", suite.PackageName)
+               }
+
+               runner.CleanUp()
+       }
+
+       if passed {
+               os.Exit(0)
+       }
+       os.Exit(1)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
new file mode 100644 (file)
index 0000000..02e2b3b
--- /dev/null
@@ -0,0 +1,123 @@
+package convert
+
+import (
+       "fmt"
+       "go/ast"
+       "strings"
+       "unicode"
+)
+
+/*
+ * Creates a func init() node
+ */
+func createVarUnderscoreBlock() *ast.ValueSpec {
+       valueSpec := &ast.ValueSpec{}
+       object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0}
+       ident := &ast.Ident{Name: "_", Obj: object}
+       valueSpec.Names = append(valueSpec.Names, ident)
+       return valueSpec
+}
+
+/*
+ * Creates a Describe("Testing with ginkgo", func() { }) node
+ */
+func createDescribeBlock() *ast.CallExpr {
+       blockStatement := &ast.BlockStmt{List: []ast.Stmt{}}
+
+       fieldList := &ast.FieldList{}
+       funcType := &ast.FuncType{Params: fieldList}
+       funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
+       basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""}
+       describeIdent := &ast.Ident{Name: "Describe"}
+       return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}}
+}
+
+/*
+ * Convenience function to return the name of the *testing.T param
+ * for a Test function that will be rewritten. This is useful because
+ * we will want to replace the usage of this named *testing.T inside the
+ * body of the function with a GinktoT.
+ */
+func namedTestingTArg(node *ast.FuncDecl) string {
+       return node.Type.Params.List[0].Names[0].Name // *exhale*
+}
+
+/*
+ * Convenience function to return the block statement node for a Describe statement
+ */
+func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt {
+       var funcLit *ast.FuncLit
+       var found = false
+
+       for _, node := range desc.Args {
+               switch node := node.(type) {
+               case *ast.FuncLit:
+                       found = true
+                       funcLit = node
+                       break
+               }
+       }
+
+       if !found {
+               panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.")
+       }
+
+       return funcLit.Body
+}
+
+/* convenience function for creating an It("TestNameHere")
+ * with all the body of the test function inside the anonymous
+ * func passed to It()
+ */
+func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt {
+       blockStatement := &ast.BlockStmt{List: testFunc.Body.List}
+       fieldList := &ast.FieldList{}
+       funcType := &ast.FuncType{Params: fieldList}
+       funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
+
+       testName := rewriteTestName(testFunc.Name.Name)
+       basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)}
+       itBlockIdent := &ast.Ident{Name: "It"}
+       callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}}
+       return &ast.ExprStmt{X: callExpr}
+}
+
+/*
+* rewrite test names to be human readable
+* eg: rewrites "TestSomethingAmazing" as "something amazing"
+ */
+func rewriteTestName(testName string) string {
+       nameComponents := []string{}
+       currentString := ""
+       indexOfTest := strings.Index(testName, "Test")
+       if indexOfTest != 0 {
+               return testName
+       }
+
+       testName = strings.Replace(testName, "Test", "", 1)
+       first, rest := testName[0], testName[1:]
+       testName = string(unicode.ToLower(rune(first))) + rest
+
+       for _, rune := range testName {
+               if unicode.IsUpper(rune) {
+                       nameComponents = append(nameComponents, currentString)
+                       currentString = string(unicode.ToLower(rune))
+               } else {
+                       currentString += string(rune)
+               }
+       }
+
+       return strings.Join(append(nameComponents, currentString), " ")
+}
+
+func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr {
+       return &ast.CallExpr{
+               Lparen: ident.NamePos + 1,
+               Rparen: ident.NamePos + 2,
+               Fun:    &ast.Ident{Name: "GinkgoT"},
+       }
+}
+
+func newGinkgoTInterface() *ast.Ident {
+       return &ast.Ident{Name: "GinkgoTInterface"}
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/import.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/import.go
new file mode 100644 (file)
index 0000000..e226196
--- /dev/null
@@ -0,0 +1,91 @@
+package convert
+
+import (
+       "errors"
+       "fmt"
+       "go/ast"
+)
+
+/*
+ * Given the root node of an AST, returns the node containing the
+ * import statements for the file.
+ */
+func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) {
+       for _, declaration := range rootNode.Decls {
+               decl, ok := declaration.(*ast.GenDecl)
+               if !ok || len(decl.Specs) == 0 {
+                       continue
+               }
+
+               _, ok = decl.Specs[0].(*ast.ImportSpec)
+               if ok {
+                       imports = decl
+                       return
+               }
+       }
+
+       err = errors.New(fmt.Sprintf("Could not find imports for root node:\n\t%#v\n", rootNode))
+       return
+}
+
+/*
+ * Removes "testing" import, if present
+ */
+func removeTestingImport(rootNode *ast.File) {
+       importDecl, err := importsForRootNode(rootNode)
+       if err != nil {
+               panic(err.Error())
+       }
+
+       var index int
+       for i, importSpec := range importDecl.Specs {
+               importSpec := importSpec.(*ast.ImportSpec)
+               if importSpec.Path.Value == "\"testing\"" {
+                       index = i
+                       break
+               }
+       }
+
+       importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...)
+}
+
+/*
+ * Adds import statements for onsi/ginkgo, if missing
+ */
+func addGinkgoImports(rootNode *ast.File) {
+       importDecl, err := importsForRootNode(rootNode)
+       if err != nil {
+               panic(err.Error())
+       }
+
+       if len(importDecl.Specs) == 0 {
+               // TODO: might need to create a import decl here
+               panic("unimplemented : expected to find an imports block")
+       }
+
+       needsGinkgo := true
+       for _, importSpec := range importDecl.Specs {
+               importSpec, ok := importSpec.(*ast.ImportSpec)
+               if !ok {
+                       continue
+               }
+
+               if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" {
+                       needsGinkgo = false
+               }
+       }
+
+       if needsGinkgo {
+               importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\""))
+       }
+}
+
+/*
+ * convenience function to create an import statement
+ */
+func createImport(name, path string) *ast.ImportSpec {
+       return &ast.ImportSpec{
+               Name: &ast.Ident{Name: name},
+               Path: &ast.BasicLit{Kind: 9, Value: path},
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
new file mode 100644 (file)
index 0000000..ed09c46
--- /dev/null
@@ -0,0 +1,127 @@
+package convert
+
+import (
+       "fmt"
+       "go/build"
+       "io/ioutil"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "regexp"
+)
+
+/*
+ * RewritePackage takes a name (eg: my-package/tools), finds its test files using
+ * Go's build package, and then rewrites them. A ginkgo test suite file will
+ * also be added for this package, and all of its child packages.
+ */
+func RewritePackage(packageName string) {
+       pkg, err := packageWithName(packageName)
+       if err != nil {
+               panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
+       }
+
+       for _, filename := range findTestsInPackage(pkg) {
+               rewriteTestsInFile(filename)
+       }
+       return
+}
+
+/*
+ * Given a package, findTestsInPackage reads the test files in the directory,
+ * and then recurses on each child package, returning a slice of all test files
+ * found in this process.
+ */
+func findTestsInPackage(pkg *build.Package) (testfiles []string) {
+       for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
+               testfiles = append(testfiles, filepath.Join(pkg.Dir, file))
+       }
+
+       dirFiles, err := ioutil.ReadDir(pkg.Dir)
+       if err != nil {
+               panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error()))
+       }
+
+       re := regexp.MustCompile(`^[._]`)
+
+       for _, file := range dirFiles {
+               if !file.IsDir() {
+                       continue
+               }
+
+               if re.Match([]byte(file.Name())) {
+                       continue
+               }
+
+               packageName := filepath.Join(pkg.ImportPath, file.Name())
+               subPackage, err := packageWithName(packageName)
+               if err != nil {
+                       panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
+               }
+
+               testfiles = append(testfiles, findTestsInPackage(subPackage)...)
+       }
+
+       addGinkgoSuiteForPackage(pkg)
+       goFmtPackage(pkg)
+       return
+}
+
+/*
+ * Shells out to `ginkgo bootstrap` to create a test suite file
+ */
+func addGinkgoSuiteForPackage(pkg *build.Package) {
+       originalDir, err := os.Getwd()
+       if err != nil {
+               panic(err)
+       }
+
+       suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go")
+
+       _, err = os.Stat(suite_test_file)
+       if err == nil {
+               return // test file already exists, this should be a no-op
+       }
+
+       err = os.Chdir(pkg.Dir)
+       if err != nil {
+               panic(err)
+       }
+
+       output, err := exec.Command("ginkgo", "bootstrap").Output()
+
+       if err != nil {
+               panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error()))
+       }
+
+       err = os.Chdir(originalDir)
+       if err != nil {
+               panic(err)
+       }
+}
+
+/*
+ * Shells out to `go fmt` to format the package
+ */
+func goFmtPackage(pkg *build.Package) {
+       output, err := exec.Command("go", "fmt", pkg.ImportPath).Output()
+
+       if err != nil {
+               fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error())
+       }
+}
+
+/*
+ * Attempts to return a package with its test files already read.
+ * The ImportMode arg to build.Import lets you specify if you want go to read the
+ * buildable go files inside the package, but it fails if the package has no go files
+ */
+func packageWithName(name string) (pkg *build.Package, err error) {
+       pkg, err = build.Default.Import(name, ".", build.ImportMode(0))
+       if err == nil {
+               return
+       }
+
+       pkg, err = build.Default.Import(name, ".", build.ImportMode(1))
+       return
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
new file mode 100644 (file)
index 0000000..b33595c
--- /dev/null
@@ -0,0 +1,56 @@
+package convert
+
+import (
+       "go/ast"
+       "regexp"
+)
+
+/*
+ * Given a root node, walks its top level statements and returns
+ * points to function nodes to rewrite as It statements.
+ * These functions, according to Go testing convention, must be named
+ * TestWithCamelCasedName and receive a single *testing.T argument.
+ */
+func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) {
+       testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+")
+
+       ast.Inspect(rootNode, func(node ast.Node) bool {
+               if node == nil {
+                       return false
+               }
+
+               switch node := node.(type) {
+               case *ast.FuncDecl:
+                       matches := testNameRegexp.MatchString(node.Name.Name)
+
+                       if matches && receivesTestingT(node) {
+                               testsToRewrite = append(testsToRewrite, node)
+                       }
+               }
+
+               return true
+       })
+
+       return
+}
+
+/*
+ * convenience function that looks at args to a function and determines if its
+ * params include an argument of type  *testing.T
+ */
+func receivesTestingT(node *ast.FuncDecl) bool {
+       if len(node.Type.Params.List) != 1 {
+               return false
+       }
+
+       base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr)
+       if !ok {
+               return false
+       }
+
+       intermediate := base.X.(*ast.SelectorExpr)
+       isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing"
+       isTestingT := intermediate.Sel.Name == "T"
+
+       return isTestingPackage && isTestingT
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
new file mode 100644 (file)
index 0000000..4b001a7
--- /dev/null
@@ -0,0 +1,163 @@
+package convert
+
+import (
+       "bytes"
+       "fmt"
+       "go/ast"
+       "go/format"
+       "go/parser"
+       "go/token"
+       "io/ioutil"
+       "os"
+)
+
+/*
+ * Given a file path, rewrites any tests in the Ginkgo format.
+ * First, we parse the AST, and update the imports declaration.
+ * Then, we walk the first child elements in the file, returning tests to rewrite.
+ * A top level init func is declared, with a single Describe func inside.
+ * Then the test functions to rewrite are inserted as It statements inside the Describe.
+ * Finally we walk the rest of the file, replacing other usages of *testing.T
+ * Once that is complete, we write the AST back out again to its file.
+ */
+func rewriteTestsInFile(pathToFile string) {
+       fileSet := token.NewFileSet()
+       rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, 0)
+       if err != nil {
+               panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
+       }
+
+       addGinkgoImports(rootNode)
+       removeTestingImport(rootNode)
+
+       varUnderscoreBlock := createVarUnderscoreBlock()
+       describeBlock := createDescribeBlock()
+       varUnderscoreBlock.Values = []ast.Expr{describeBlock}
+
+       for _, testFunc := range findTestFuncs(rootNode) {
+               rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock)
+       }
+
+       underscoreDecl := &ast.GenDecl{
+               Tok:    85, // gah, magick numbers are needed to make this work
+               TokPos: 14, // this tricks Go into writing "var _ = Describe"
+               Specs:  []ast.Spec{varUnderscoreBlock},
+       }
+
+       imports := rootNode.Decls[0]
+       tail := rootNode.Decls[1:]
+       rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...)
+       rewriteOtherFuncsToUseGinkgoT(rootNode.Decls)
+       walkNodesInRootNodeReplacingTestingT(rootNode)
+
+       var buffer bytes.Buffer
+       if err = format.Node(&buffer, fileSet, rootNode); err != nil {
+               panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error()))
+       }
+
+       fileInfo, err := os.Stat(pathToFile)
+       if err != nil {
+               panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
+       }
+
+       ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
+       return
+}
+
+/*
+ * Given a test func named TestDoesSomethingNeat, rewrites it as
+ * It("does something neat", func() { __test_body_here__ }) and adds it
+ * to the Describe's list of statements
+ */
+func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) {
+       var funcIndex int = -1
+       for index, child := range rootNode.Decls {
+               if child == testFunc {
+                       funcIndex = index
+                       break
+               }
+       }
+
+       if funcIndex < 0 {
+               panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name))
+       }
+
+       var block *ast.BlockStmt = blockStatementFromDescribe(describe)
+       block.List = append(block.List, createItStatementForTestFunc(testFunc))
+       replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc))
+
+       // remove the old test func from the root node's declarations
+       rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...)
+       return
+}
+
+/*
+ * walks nodes inside of a test func's statements and replaces the usage of
+ * it's named *testing.T param with GinkgoT's
+ */
+func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) {
+       ast.Inspect(statementsBlock, func(node ast.Node) bool {
+               if node == nil {
+                       return false
+               }
+
+               keyValueExpr, ok := node.(*ast.KeyValueExpr)
+               if ok {
+                       replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT)
+                       return true
+               }
+
+               funcLiteral, ok := node.(*ast.FuncLit)
+               if ok {
+                       replaceTypeDeclTestingTsInFuncLiteral(funcLiteral)
+                       return true
+               }
+
+               callExpr, ok := node.(*ast.CallExpr)
+               if !ok {
+                       return true
+               }
+               replaceTestingTsInArgsLists(callExpr, testingT)
+
+               funCall, ok := callExpr.Fun.(*ast.SelectorExpr)
+               if ok {
+                       replaceTestingTsMethodCalls(funCall, testingT)
+               }
+
+               return true
+       })
+}
+
+/*
+ * rewrite t.Fail() or any other *testing.T method by replacing with T().Fail()
+ * This function receives a selector expression (eg: t.Fail()) and
+ * the name of the *testing.T param from the function declaration. Rewrites the
+ * selector expression in place if the target was a *testing.T
+ */
+func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) {
+       ident, ok := selectorExpr.X.(*ast.Ident)
+       if !ok {
+               return
+       }
+
+       if ident.Name == testingT {
+               selectorExpr.X = newGinkgoTFromIdent(ident)
+       }
+}
+
+/*
+ * replaces usages of a named *testing.T param inside of a call expression
+ * with a new GinkgoT object
+ */
+func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) {
+       for index, arg := range callExpr.Args {
+               ident, ok := arg.(*ast.Ident)
+               if !ok {
+                       continue
+               }
+
+               if ident.Name == testingT {
+                       callExpr.Args[index] = newGinkgoTFromIdent(ident)
+               }
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
new file mode 100644 (file)
index 0000000..418cdc4
--- /dev/null
@@ -0,0 +1,130 @@
+package convert
+
+import (
+       "go/ast"
+)
+
+/*
+ * Rewrites any other top level funcs that receive a *testing.T param
+ */
+func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) {
+       for _, decl := range declarations {
+               decl, ok := decl.(*ast.FuncDecl)
+               if !ok {
+                       continue
+               }
+
+               for _, param := range decl.Type.Params.List {
+                       starExpr, ok := param.Type.(*ast.StarExpr)
+                       if !ok {
+                               continue
+                       }
+
+                       selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
+                       if !ok {
+                               continue
+                       }
+
+                       xIdent, ok := selectorExpr.X.(*ast.Ident)
+                       if !ok || xIdent.Name != "testing" {
+                               continue
+                       }
+
+                       if selectorExpr.Sel.Name != "T" {
+                               continue
+                       }
+
+                       param.Type = newGinkgoTInterface()
+               }
+       }
+}
+
+/*
+ * Walks all of the nodes in the file, replacing *testing.T in struct
+ * and func literal nodes. eg:
+ *   type foo struct { *testing.T }
+ *   var bar = func(t *testing.T) { }
+ */
+func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) {
+       ast.Inspect(rootNode, func(node ast.Node) bool {
+               if node == nil {
+                       return false
+               }
+
+               switch node := node.(type) {
+               case *ast.StructType:
+                       replaceTestingTsInStructType(node)
+               case *ast.FuncLit:
+                       replaceTypeDeclTestingTsInFuncLiteral(node)
+               }
+
+               return true
+       })
+}
+
+/*
+ * replaces named *testing.T inside a composite literal
+ */
+func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) {
+       ident, ok := kve.Value.(*ast.Ident)
+       if !ok {
+               return
+       }
+
+       if ident.Name == testingT {
+               kve.Value = newGinkgoTFromIdent(ident)
+       }
+}
+
+/*
+ * replaces *testing.T params in a func literal with GinkgoT
+ */
+func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) {
+       for _, arg := range functionLiteral.Type.Params.List {
+               starExpr, ok := arg.Type.(*ast.StarExpr)
+               if !ok {
+                       continue
+               }
+
+               selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
+               if !ok {
+                       continue
+               }
+
+               target, ok := selectorExpr.X.(*ast.Ident)
+               if !ok {
+                       continue
+               }
+
+               if target.Name == "testing" && selectorExpr.Sel.Name == "T" {
+                       arg.Type = newGinkgoTInterface()
+               }
+       }
+}
+
+/*
+ * Replaces *testing.T types inside of a struct declaration with a GinkgoT
+ * eg: type foo struct { *testing.T }
+ */
+func replaceTestingTsInStructType(structType *ast.StructType) {
+       for _, field := range structType.Fields.List {
+               starExpr, ok := field.Type.(*ast.StarExpr)
+               if !ok {
+                       continue
+               }
+
+               selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
+               if !ok {
+                       continue
+               }
+
+               xIdent, ok := selectorExpr.X.(*ast.Ident)
+               if !ok {
+                       continue
+               }
+
+               if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" {
+                       field.Type = newGinkgoTInterface()
+               }
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert_command.go
new file mode 100644 (file)
index 0000000..89e60d3
--- /dev/null
@@ -0,0 +1,44 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+       "github.com/onsi/ginkgo/ginkgo/convert"
+       "os"
+)
+
+func BuildConvertCommand() *Command {
+       return &Command{
+               Name:         "convert",
+               FlagSet:      flag.NewFlagSet("convert", flag.ExitOnError),
+               UsageCommand: "ginkgo convert /path/to/package",
+               Usage: []string{
+                       "Convert the package at the passed in path from an XUnit-style test to a Ginkgo-style test",
+               },
+               Command: convertPackage,
+       }
+}
+
+func convertPackage(args []string, additionalArgs []string) {
+       if len(args) != 1 {
+               println(fmt.Sprintf("usage: ginkgo convert /path/to/your/package"))
+               os.Exit(1)
+       }
+
+       defer func() {
+               err := recover()
+               if err != nil {
+                       switch err := err.(type) {
+                       case error:
+                               println(err.Error())
+                       case string:
+                               println(err)
+                       default:
+                               println(fmt.Sprintf("unexpected error: %#v", err))
+                       }
+                       os.Exit(1)
+               }
+       }()
+
+       convert.RewritePackage(args[0])
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/generate_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/generate_command.go
new file mode 100644 (file)
index 0000000..7dd3b4d
--- /dev/null
@@ -0,0 +1,164 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+       "os"
+       "path/filepath"
+       "strings"
+       "text/template"
+)
+
+func BuildGenerateCommand() *Command {
+       var agouti, noDot bool
+       flagSet := flag.NewFlagSet("generate", flag.ExitOnError)
+       flagSet.BoolVar(&agouti, "agouti", false, "If set, generate will generate a test file for writing Agouti tests")
+       flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega")
+
+       return &Command{
+               Name:         "generate",
+               FlagSet:      flagSet,
+               UsageCommand: "ginkgo generate <filename(s)>",
+               Usage: []string{
+                       "Generate a test file named filename_test.go",
+                       "If the optional <filenames> argument is omitted, a file named after the package in the current directory will be created.",
+                       "Accepts the following flags:",
+               },
+               Command: func(args []string, additionalArgs []string) {
+                       generateSpec(args, agouti, noDot)
+               },
+       }
+}
+
+var specText = `package {{.Package}}_test
+
+import (
+       . "{{.PackageImportPath}}"
+
+       {{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
+       {{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
+)
+
+var _ = Describe("{{.Subject}}", func() {
+
+})
+`
+
+var agoutiSpecText = `package {{.Package}}_test
+
+import (
+       . "{{.PackageImportPath}}"
+
+       {{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
+       {{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
+       . "github.com/sclevine/agouti/matchers"
+       "github.com/sclevine/agouti"
+)
+
+var _ = Describe("{{.Subject}}", func() {
+       var page *agouti.Page
+
+       BeforeEach(func() {
+               var err error
+               page, err = agoutiDriver.NewPage()
+               Expect(err).NotTo(HaveOccurred())
+       })
+
+       AfterEach(func() {
+               Expect(page.Destroy()).To(Succeed())
+       })
+})
+`
+
+type specData struct {
+       Package           string
+       Subject           string
+       PackageImportPath string
+       IncludeImports    bool
+}
+
+func generateSpec(args []string, agouti, noDot bool) {
+       if len(args) == 0 {
+               err := generateSpecForSubject("", agouti, noDot)
+               if err != nil {
+                       fmt.Println(err.Error())
+                       fmt.Println("")
+                       os.Exit(1)
+               }
+               fmt.Println("")
+               return
+       }
+
+       var failed bool
+       for _, arg := range args {
+               err := generateSpecForSubject(arg, agouti, noDot)
+               if err != nil {
+                       failed = true
+                       fmt.Println(err.Error())
+               }
+       }
+       fmt.Println("")
+       if failed {
+               os.Exit(1)
+       }
+}
+
+func generateSpecForSubject(subject string, agouti, noDot bool) error {
+       packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
+       if subject != "" {
+               subject = strings.Split(subject, ".go")[0]
+               subject = strings.Split(subject, "_test")[0]
+               specFilePrefix = subject
+               formattedName = prettifyPackageName(subject)
+       }
+
+       data := specData{
+               Package:           packageName,
+               Subject:           formattedName,
+               PackageImportPath: getPackageImportPath(),
+               IncludeImports:    !noDot,
+       }
+
+       targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
+       if fileExists(targetFile) {
+               return fmt.Errorf("%s already exists.", targetFile)
+       } else {
+               fmt.Printf("Generating ginkgo test for %s in:\n  %s\n", data.Subject, targetFile)
+       }
+
+       f, err := os.Create(targetFile)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+
+       var templateText string
+       if agouti {
+               templateText = agoutiSpecText
+       } else {
+               templateText = specText
+       }
+
+       specTemplate, err := template.New("spec").Parse(templateText)
+       if err != nil {
+               return err
+       }
+
+       specTemplate.Execute(f, data)
+       goFmt(targetFile)
+       return nil
+}
+
+func getPackageImportPath() string {
+       workingDir, err := os.Getwd()
+       if err != nil {
+               panic(err.Error())
+       }
+       sep := string(filepath.Separator)
+       paths := strings.Split(workingDir, sep+"src"+sep)
+       if len(paths) == 1 {
+               fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
+               return "UNKNOWN_PACKAGE_PATH"
+       }
+       return filepath.ToSlash(paths[len(paths)-1])
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/help_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/help_command.go
new file mode 100644 (file)
index 0000000..a42d4f8
--- /dev/null
@@ -0,0 +1,31 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+)
+
+func BuildHelpCommand() *Command {
+       return &Command{
+               Name:         "help",
+               FlagSet:      flag.NewFlagSet("help", flag.ExitOnError),
+               UsageCommand: "ginkgo help <COMAND>",
+               Usage: []string{
+                       "Print usage information.  If a command is passed in, print usage information just for that command.",
+               },
+               Command: printHelp,
+       }
+}
+
+func printHelp(args []string, additionalArgs []string) {
+       if len(args) == 0 {
+               usage()
+       } else {
+               command, found := commandMatching(args[0])
+               if !found {
+                       complainAndQuit(fmt.Sprintf("Unknown command: %s", args[0]))
+               }
+
+               usageForCommand(command, true)
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
new file mode 100644 (file)
index 0000000..c15db0b
--- /dev/null
@@ -0,0 +1,52 @@
+package interrupthandler
+
+import (
+       "os"
+       "os/signal"
+       "sync"
+       "syscall"
+)
+
+type InterruptHandler struct {
+       interruptCount int
+       lock           *sync.Mutex
+       C              chan bool
+}
+
+func NewInterruptHandler() *InterruptHandler {
+       h := &InterruptHandler{
+               lock: &sync.Mutex{},
+               C:    make(chan bool, 0),
+       }
+
+       go h.handleInterrupt()
+       SwallowSigQuit()
+
+       return h
+}
+
+func (h *InterruptHandler) WasInterrupted() bool {
+       h.lock.Lock()
+       defer h.lock.Unlock()
+
+       return h.interruptCount > 0
+}
+
+func (h *InterruptHandler) handleInterrupt() {
+       c := make(chan os.Signal, 1)
+       signal.Notify(c, os.Interrupt, syscall.SIGTERM)
+
+       <-c
+       signal.Stop(c)
+
+       h.lock.Lock()
+       h.interruptCount++
+       if h.interruptCount == 1 {
+               close(h.C)
+       } else if h.interruptCount > 5 {
+               os.Exit(1)
+       }
+       h.lock.Unlock()
+
+       go h.handleInterrupt()
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go
new file mode 100644 (file)
index 0000000..14c9421
--- /dev/null
@@ -0,0 +1,14 @@
+// +build freebsd openbsd netbsd dragonfly darwin linux
+
+package interrupthandler
+
+import (
+       "os"
+       "os/signal"
+       "syscall"
+)
+
+func SwallowSigQuit() {
+       c := make(chan os.Signal, 1024)
+       signal.Notify(c, syscall.SIGQUIT)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go
new file mode 100644 (file)
index 0000000..7f4a50e
--- /dev/null
@@ -0,0 +1,7 @@
+// +build windows
+
+package interrupthandler
+
+func SwallowSigQuit() {
+       //noop
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/main.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/main.go
new file mode 100644 (file)
index 0000000..b031b80
--- /dev/null
@@ -0,0 +1,291 @@
+/*
+The Ginkgo CLI
+
+The Ginkgo CLI is fully documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
+
+You can also learn more by running:
+
+       ginkgo help
+
+Here are some of the more commonly used commands:
+
+To install:
+
+       go install github.com/onsi/ginkgo/ginkgo
+
+To run tests:
+
+       ginkgo
+
+To run tests in all subdirectories:
+
+       ginkgo -r
+
+To run tests in particular packages:
+
+       ginkgo <flags> /path/to/package /path/to/another/package
+
+To pass arguments/flags to your tests:
+
+       ginkgo <flags> <packages> -- <pass-throughs>
+
+To run tests in parallel
+
+       ginkgo -p
+
+this will automatically detect the optimal number of nodes to use.  Alternatively, you can specify the number of nodes with:
+
+       ginkgo -nodes=N
+
+(note that you don't need to provide -p in this case).
+
+By default the Ginkgo CLI will spin up a server that the individual test processes send test output to.  The CLI aggregates this output and then presents coherent test output, one test at a time, as each test completes.
+An alternative is to have the parallel nodes run and stream interleaved output back.  This useful for debugging, particularly in contexts where tests hang/fail to start.  To get this interleaved output:
+
+       ginkgo -nodes=N -stream=true
+
+On windows, the default value for stream is true.
+
+By default, when running multiple tests (with -r or a list of packages) Ginkgo will abort when a test fails.  To have Ginkgo run subsequent test suites instead you can:
+
+       ginkgo -keepGoing
+
+To monitor packages and rerun tests when changes occur:
+
+       ginkgo watch <-r> </path/to/package>
+
+passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them.
+`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
+that depend on X are not rerun.
+
+[OSX & Linux only] To receive (desktop) notifications when a test run completes:
+
+       ginkgo -notify
+
+this is particularly useful with `ginkgo watch`.  Notifications are currently only supported on OS X and require that you `brew install terminal-notifier`
+
+Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails.  You can do this with:
+
+       ginkgo -untilItFails
+
+To bootstrap a test suite:
+
+       ginkgo bootstrap
+
+To generate a test file:
+
+       ginkgo generate <test_file_name>
+
+To bootstrap/generate test files without using "." imports:
+
+       ginkgo bootstrap --nodot
+       ginkgo generate --nodot
+
+this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions.  When you pull to the latest Ginkgo/Gomega you'll want to run
+
+       ginkgo nodot
+
+to refresh this list and pull in any new identifiers.  In particular, this will pull in any new Gomega matchers that get added.
+
+To convert an existing XUnit style test suite to a Ginkgo-style test suite:
+
+       ginkgo convert .
+
+To unfocus tests:
+
+       ginkgo unfocus
+
+or
+
+       ginkgo blur
+
+To compile a test suite:
+
+       ginkgo build <path-to-package>
+
+will output an executable file named `package.test`.  This can be run directly or by invoking
+
+       ginkgo <path-to-package.test>
+
+To print out Ginkgo's version:
+
+       ginkgo version
+
+To get more help:
+
+       ginkgo help
+*/
+package main
+
+import (
+       "flag"
+       "fmt"
+       "os"
+       "os/exec"
+       "strings"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/ginkgo/testsuite"
+)
+
+const greenColor = "\x1b[32m"
+const redColor = "\x1b[91m"
+const defaultStyle = "\x1b[0m"
+const lightGrayColor = "\x1b[37m"
+
+type Command struct {
+       Name                      string
+       AltName                   string
+       FlagSet                   *flag.FlagSet
+       Usage                     []string
+       UsageCommand              string
+       Command                   func(args []string, additionalArgs []string)
+       SuppressFlagDocumentation bool
+       FlagDocSubstitute         []string
+}
+
+func (c *Command) Matches(name string) bool {
+       return c.Name == name || (c.AltName != "" && c.AltName == name)
+}
+
+func (c *Command) Run(args []string, additionalArgs []string) {
+       c.FlagSet.Parse(args)
+       c.Command(c.FlagSet.Args(), additionalArgs)
+}
+
+var DefaultCommand *Command
+var Commands []*Command
+
+func init() {
+       DefaultCommand = BuildRunCommand()
+       Commands = append(Commands, BuildWatchCommand())
+       Commands = append(Commands, BuildBuildCommand())
+       Commands = append(Commands, BuildBootstrapCommand())
+       Commands = append(Commands, BuildGenerateCommand())
+       Commands = append(Commands, BuildNodotCommand())
+       Commands = append(Commands, BuildConvertCommand())
+       Commands = append(Commands, BuildUnfocusCommand())
+       Commands = append(Commands, BuildVersionCommand())
+       Commands = append(Commands, BuildHelpCommand())
+}
+
+func main() {
+       args := []string{}
+       additionalArgs := []string{}
+
+       foundDelimiter := false
+
+       for _, arg := range os.Args[1:] {
+               if !foundDelimiter {
+                       if arg == "--" {
+                               foundDelimiter = true
+                               continue
+                       }
+               }
+
+               if foundDelimiter {
+                       additionalArgs = append(additionalArgs, arg)
+               } else {
+                       args = append(args, arg)
+               }
+       }
+
+       if len(args) > 0 {
+               commandToRun, found := commandMatching(args[0])
+               if found {
+                       commandToRun.Run(args[1:], additionalArgs)
+                       return
+               }
+       }
+
+       DefaultCommand.Run(args, additionalArgs)
+}
+
+func commandMatching(name string) (*Command, bool) {
+       for _, command := range Commands {
+               if command.Matches(name) {
+                       return command, true
+               }
+       }
+       return nil, false
+}
+
+func usage() {
+       fmt.Fprintf(os.Stderr, "Ginkgo Version %s\n\n", config.VERSION)
+       usageForCommand(DefaultCommand, false)
+       for _, command := range Commands {
+               fmt.Fprintf(os.Stderr, "\n")
+               usageForCommand(command, false)
+       }
+}
+
+func usageForCommand(command *Command, longForm bool) {
+       fmt.Fprintf(os.Stderr, "%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand)))
+       fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.Usage, "\n"))
+       if command.SuppressFlagDocumentation && !longForm {
+               fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.FlagDocSubstitute, "\n  "))
+       } else {
+               command.FlagSet.PrintDefaults()
+       }
+}
+
+func complainAndQuit(complaint string) {
+       fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint)
+       os.Exit(1)
+}
+
+func findSuites(args []string, recurse bool, skipPackage string, allowPrecompiled bool) ([]testsuite.TestSuite, []string) {
+       suites := []testsuite.TestSuite{}
+
+       if len(args) > 0 {
+               for _, arg := range args {
+                       if allowPrecompiled {
+                               suite, err := testsuite.PrecompiledTestSuite(arg)
+                               if err == nil {
+                                       suites = append(suites, suite)
+                                       continue
+                               }
+                       }
+                       suites = append(suites, testsuite.SuitesInDir(arg, recurse)...)
+               }
+       } else {
+               suites = testsuite.SuitesInDir(".", recurse)
+       }
+
+       skippedPackages := []string{}
+       if skipPackage != "" {
+               skipFilters := strings.Split(skipPackage, ",")
+               filteredSuites := []testsuite.TestSuite{}
+               for _, suite := range suites {
+                       skip := false
+                       for _, skipFilter := range skipFilters {
+                               if strings.Contains(suite.Path, skipFilter) {
+                                       skip = true
+                                       break
+                               }
+                       }
+                       if skip {
+                               skippedPackages = append(skippedPackages, suite.Path)
+                       } else {
+                               filteredSuites = append(filteredSuites, suite)
+                       }
+               }
+               suites = filteredSuites
+       }
+
+       return suites, skippedPackages
+}
+
+func goFmt(path string) {
+       err := exec.Command("go", "fmt", path).Run()
+       if err != nil {
+               complainAndQuit("Could not fmt: " + err.Error())
+       }
+}
+
+func pluralizedWord(singular, plural string, count int) string {
+       if count == 1 {
+               return singular
+       }
+       return plural
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
new file mode 100644 (file)
index 0000000..3f7237c
--- /dev/null
@@ -0,0 +1,194 @@
+package nodot
+
+import (
+       "fmt"
+       "go/ast"
+       "go/build"
+       "go/parser"
+       "go/token"
+       "path/filepath"
+       "strings"
+)
+
+func ApplyNoDot(data []byte) ([]byte, error) {
+       sections, err := generateNodotSections()
+       if err != nil {
+               return nil, err
+       }
+
+       for _, section := range sections {
+               data = section.createOrUpdateIn(data)
+       }
+
+       return data, nil
+}
+
+type nodotSection struct {
+       name         string
+       pkg          string
+       declarations []string
+       types        []string
+}
+
+func (s nodotSection) createOrUpdateIn(data []byte) []byte {
+       renames := map[string]string{}
+
+       contents := string(data)
+
+       lines := strings.Split(contents, "\n")
+
+       comment := "// Declarations for " + s.name
+
+       newLines := []string{}
+       for _, line := range lines {
+               if line == comment {
+                       continue
+               }
+
+               words := strings.Split(line, " ")
+               lastWord := words[len(words)-1]
+
+               if s.containsDeclarationOrType(lastWord) {
+                       renames[lastWord] = words[1]
+                       continue
+               }
+
+               newLines = append(newLines, line)
+       }
+
+       if len(newLines[len(newLines)-1]) > 0 {
+               newLines = append(newLines, "")
+       }
+
+       newLines = append(newLines, comment)
+
+       for _, typ := range s.types {
+               name, ok := renames[s.prefix(typ)]
+               if !ok {
+                       name = typ
+               }
+               newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ)))
+       }
+
+       for _, decl := range s.declarations {
+               name, ok := renames[s.prefix(decl)]
+               if !ok {
+                       name = decl
+               }
+               newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl)))
+       }
+
+       newLines = append(newLines, "")
+
+       newContents := strings.Join(newLines, "\n")
+
+       return []byte(newContents)
+}
+
+func (s nodotSection) prefix(declOrType string) string {
+       return s.pkg + "." + declOrType
+}
+
+func (s nodotSection) containsDeclarationOrType(word string) bool {
+       for _, declaration := range s.declarations {
+               if s.prefix(declaration) == word {
+                       return true
+               }
+       }
+
+       for _, typ := range s.types {
+               if s.prefix(typ) == word {
+                       return true
+               }
+       }
+
+       return false
+}
+
+func generateNodotSections() ([]nodotSection, error) {
+       sections := []nodotSection{}
+
+       declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC")
+       if err != nil {
+               return nil, err
+       }
+       sections = append(sections, nodotSection{
+               name:         "Ginkgo DSL",
+               pkg:          "ginkgo",
+               declarations: declarations,
+               types:        []string{"Done", "Benchmarker"},
+       })
+
+       declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION")
+       if err != nil {
+               return nil, err
+       }
+       sections = append(sections, nodotSection{
+               name:         "Gomega DSL",
+               pkg:          "gomega",
+               declarations: declarations,
+       })
+
+       declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go")
+       if err != nil {
+               return nil, err
+       }
+       sections = append(sections, nodotSection{
+               name:         "Gomega Matchers",
+               pkg:          "gomega",
+               declarations: declarations,
+       })
+
+       return sections, nil
+}
+
+func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) {
+       pkg, err := build.Import(pkgPath, ".", 0)
+       if err != nil {
+               return []string{}, err
+       }
+
+       declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename))
+       if err != nil {
+               return []string{}, err
+       }
+
+       blacklistLookup := map[string]bool{}
+       for _, declaration := range blacklist {
+               blacklistLookup[declaration] = true
+       }
+
+       filteredDeclarations := []string{}
+       for _, declaration := range declarations {
+               if blacklistLookup[declaration] {
+                       continue
+               }
+               filteredDeclarations = append(filteredDeclarations, declaration)
+       }
+
+       return filteredDeclarations, nil
+}
+
+func getExportedDeclarationsForFile(path string) ([]string, error) {
+       fset := token.NewFileSet()
+       tree, err := parser.ParseFile(fset, path, nil, 0)
+       if err != nil {
+               return []string{}, err
+       }
+
+       declarations := []string{}
+       ast.FileExports(tree)
+       for _, decl := range tree.Decls {
+               switch x := decl.(type) {
+               case *ast.GenDecl:
+                       switch s := x.Specs[0].(type) {
+                       case *ast.ValueSpec:
+                               declarations = append(declarations, s.Names[0].Name)
+                       }
+               case *ast.FuncDecl:
+                       declarations = append(declarations, x.Name.Name)
+               }
+       }
+
+       return declarations, nil
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go
new file mode 100644 (file)
index 0000000..212235b
--- /dev/null
@@ -0,0 +1,76 @@
+package main
+
+import (
+       "bufio"
+       "flag"
+       "github.com/onsi/ginkgo/ginkgo/nodot"
+       "io/ioutil"
+       "os"
+       "path/filepath"
+       "regexp"
+)
+
+func BuildNodotCommand() *Command {
+       return &Command{
+               Name:         "nodot",
+               FlagSet:      flag.NewFlagSet("bootstrap", flag.ExitOnError),
+               UsageCommand: "ginkgo nodot",
+               Usage: []string{
+                       "Update the nodot declarations in your test suite",
+                       "Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.",
+                       "If you've renamed a declaration, that name will be honored and not overwritten.",
+               },
+               Command: updateNodot,
+       }
+}
+
+func updateNodot(args []string, additionalArgs []string) {
+       suiteFile, perm := findSuiteFile()
+
+       data, err := ioutil.ReadFile(suiteFile)
+       if err != nil {
+               complainAndQuit("Failed to update nodot declarations: " + err.Error())
+       }
+
+       content, err := nodot.ApplyNoDot(data)
+       if err != nil {
+               complainAndQuit("Failed to update nodot declarations: " + err.Error())
+       }
+       ioutil.WriteFile(suiteFile, content, perm)
+
+       goFmt(suiteFile)
+}
+
+func findSuiteFile() (string, os.FileMode) {
+       workingDir, err := os.Getwd()
+       if err != nil {
+               complainAndQuit("Could not find suite file for nodot: " + err.Error())
+       }
+
+       files, err := ioutil.ReadDir(workingDir)
+       if err != nil {
+               complainAndQuit("Could not find suite file for nodot: " + err.Error())
+       }
+
+       re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`)
+
+       for _, file := range files {
+               if file.IsDir() {
+                       continue
+               }
+               path := filepath.Join(workingDir, file.Name())
+               f, err := os.Open(path)
+               if err != nil {
+                       complainAndQuit("Could not find suite file for nodot: " + err.Error())
+               }
+               defer f.Close()
+
+               if re.MatchReader(bufio.NewReader(f)) {
+                       return path, file.Mode()
+               }
+       }
+
+       complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.")
+
+       return "", 0
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/notifications.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/notifications.go
new file mode 100644 (file)
index 0000000..368d61f
--- /dev/null
@@ -0,0 +1,141 @@
+package main
+
+import (
+       "fmt"
+       "os"
+       "os/exec"
+       "regexp"
+       "runtime"
+       "strings"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/ginkgo/testsuite"
+)
+
+type Notifier struct {
+       commandFlags *RunWatchAndBuildCommandFlags
+}
+
+func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
+       return &Notifier{
+               commandFlags: commandFlags,
+       }
+}
+
+func (n *Notifier) VerifyNotificationsAreAvailable() {
+       if n.commandFlags.Notify {
+               onLinux := (runtime.GOOS == "linux")
+               onOSX := (runtime.GOOS == "darwin")
+               if onOSX {
+
+                       _, err := exec.LookPath("terminal-notifier")
+                       if err != nil {
+                               fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
+
+OSX:
+
+To remedy this:
+
+    brew install terminal-notifier
+
+To learn more about terminal-notifier:
+
+    https://github.com/alloy/terminal-notifier
+`)
+                               os.Exit(1)
+                       }
+
+               } else if onLinux {
+
+                       _, err := exec.LookPath("notify-send")
+                       if err != nil {
+                               fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
+
+Linux:
+
+Download and install notify-send for your distribution
+`)
+                               os.Exit(1)
+                       }
+
+               }
+       }
+}
+
+func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) {
+       if suitePassed {
+               n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName))
+       } else {
+               n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName))
+       }
+}
+
+func (n *Notifier) SendNotification(title string, subtitle string) {
+
+       if n.commandFlags.Notify {
+               onLinux := (runtime.GOOS == "linux")
+               onOSX := (runtime.GOOS == "darwin")
+
+               if onOSX {
+
+                       _, err := exec.LookPath("terminal-notifier")
+                       if err == nil {
+                               args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
+                               terminal := os.Getenv("TERM_PROGRAM")
+                               if terminal == "iTerm.app" {
+                                       args = append(args, "-activate", "com.googlecode.iterm2")
+                               } else if terminal == "Apple_Terminal" {
+                                       args = append(args, "-activate", "com.apple.Terminal")
+                               }
+
+                               exec.Command("terminal-notifier", args...).Run()
+                       }
+
+               } else if onLinux {
+
+                       _, err := exec.LookPath("notify-send")
+                       if err == nil {
+                               args := []string{"-a", "ginkgo", title, subtitle}
+                               exec.Command("notify-send", args...).Run()
+                       }
+
+               }
+       }
+}
+
+func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) {
+
+       command := n.commandFlags.AfterSuiteHook
+       if command != "" {
+
+               // Allow for string replacement to pass input to the command
+               passed := "[FAIL]"
+               if suitePassed {
+                       passed = "[PASS]"
+               }
+               command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1)
+               command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1)
+
+               // Must break command into parts
+               splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
+               parts := splitArgs.FindAllString(command, -1)
+
+               output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
+               if err != nil {
+                       fmt.Println("Post-suite command failed:")
+                       if config.DefaultReporterConfig.NoColor {
+                               fmt.Printf("\t%s\n", output)
+                       } else {
+                               fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle)
+                       }
+                       n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook))
+               } else {
+                       fmt.Println("Post-suite command succeeded:")
+                       if config.DefaultReporterConfig.NoColor {
+                               fmt.Printf("\t%s\n", output)
+                       } else {
+                               fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle)
+                       }
+               }
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_command.go
new file mode 100644 (file)
index 0000000..c5cf277
--- /dev/null
@@ -0,0 +1,192 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+       "math/rand"
+       "os"
+       "time"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/ginkgo/interrupthandler"
+       "github.com/onsi/ginkgo/ginkgo/testrunner"
+       "github.com/onsi/ginkgo/types"
+)
+
+func BuildRunCommand() *Command {
+       commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError))
+       notifier := NewNotifier(commandFlags)
+       interruptHandler := interrupthandler.NewInterruptHandler()
+       runner := &SpecRunner{
+               commandFlags:     commandFlags,
+               notifier:         notifier,
+               interruptHandler: interruptHandler,
+               suiteRunner:      NewSuiteRunner(notifier, interruptHandler),
+       }
+
+       return &Command{
+               Name:         "",
+               FlagSet:      commandFlags.FlagSet,
+               UsageCommand: "ginkgo <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
+               Usage: []string{
+                       "Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank).",
+                       "Any arguments after -- will be passed to the test.",
+                       "Accepts the following flags:",
+               },
+               Command: runner.RunSpecs,
+       }
+}
+
+type SpecRunner struct {
+       commandFlags     *RunWatchAndBuildCommandFlags
+       notifier         *Notifier
+       interruptHandler *interrupthandler.InterruptHandler
+       suiteRunner      *SuiteRunner
+}
+
+func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
+       r.commandFlags.computeNodes()
+       r.notifier.VerifyNotificationsAreAvailable()
+
+       suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, true)
+       if len(skippedPackages) > 0 {
+               fmt.Println("Will skip:")
+               for _, skippedPackage := range skippedPackages {
+                       fmt.Println("  " + skippedPackage)
+               }
+       }
+
+       if len(skippedPackages) > 0 && len(suites) == 0 {
+               fmt.Println("All tests skipped!  Exiting...")
+               os.Exit(0)
+       }
+
+       if len(suites) == 0 {
+               complainAndQuit("Found no test suites")
+       }
+
+       r.ComputeSuccinctMode(len(suites))
+
+       t := time.Now()
+
+       runners := []*testrunner.TestRunner{}
+       for _, suite := range suites {
+               runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, additionalArgs))
+       }
+
+       numSuites := 0
+       runResult := testrunner.PassingRunResult()
+       if r.commandFlags.UntilItFails {
+               iteration := 0
+               for {
+                       r.UpdateSeed()
+                       randomizedRunners := r.randomizeOrder(runners)
+                       runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
+                       iteration++
+
+                       if r.interruptHandler.WasInterrupted() {
+                               break
+                       }
+
+                       if runResult.Passed {
+                               fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration))
+                       } else {
+                               fmt.Printf("\nTests failed on attempt #%d\n\n", iteration)
+                               break
+                       }
+               }
+       } else {
+               randomizedRunners := r.randomizeOrder(runners)
+               runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
+       }
+
+       for _, runner := range runners {
+               runner.CleanUp()
+       }
+
+       fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
+
+       if runResult.Passed {
+               if runResult.HasProgrammaticFocus {
+                       fmt.Printf("Test Suite Passed\n")
+                       fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
+                       os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
+               } else {
+                       fmt.Printf("Test Suite Passed\n")
+                       os.Exit(0)
+               }
+       } else {
+               fmt.Printf("Test Suite Failed\n")
+               os.Exit(1)
+       }
+}
+
+func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
+       if config.DefaultReporterConfig.Verbose {
+               config.DefaultReporterConfig.Succinct = false
+               return
+       }
+
+       if numSuites == 1 {
+               return
+       }
+
+       if numSuites > 1 && !r.commandFlags.wasSet("succinct") {
+               config.DefaultReporterConfig.Succinct = true
+       }
+}
+
+func (r *SpecRunner) UpdateSeed() {
+       if !r.commandFlags.wasSet("seed") {
+               config.GinkgoConfig.RandomSeed = time.Now().Unix()
+       }
+}
+
+func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner {
+       if !r.commandFlags.RandomizeSuites {
+               return runners
+       }
+
+       if len(runners) <= 1 {
+               return runners
+       }
+
+       randomizedRunners := make([]*testrunner.TestRunner, len(runners))
+       randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed))
+       permutation := randomizer.Perm(len(runners))
+       for i, j := range permutation {
+               randomizedRunners[i] = runners[j]
+       }
+       return randomizedRunners
+}
+
+func orcMessage(iteration int) string {
+       if iteration < 10 {
+               return ""
+       } else if iteration < 30 {
+               return []string{
+                       "If at first you succeed...",
+                       "...try, try again.",
+                       "Looking good!",
+                       "Still good...",
+                       "I think your tests are fine....",
+                       "Yep, still passing",
+                       "Here we go again...",
+                       "Even the gophers are getting bored",
+                       "Did you try -race?",
+                       "Maybe you should stop now?",
+                       "I'm getting tired...",
+                       "What if I just made you a sandwich?",
+                       "Hit ^C, hit ^C, please hit ^C",
+                       "Make it stop. Please!",
+                       "Come on!  Enough is enough!",
+                       "Dave, this conversation can serve no purpose anymore. Goodbye.",
+                       "Just what do you think you're doing, Dave? ",
+                       "I, Sisyphus",
+                       "Insanity: doing the same thing over and over again and expecting different results. -Einstein",
+                       "I guess Einstein never tried to churn butter",
+               }[iteration-10] + "\n"
+       } else {
+               return "No, seriously... you can probably stop now.\n"
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
new file mode 100644 (file)
index 0000000..e6bcf36
--- /dev/null
@@ -0,0 +1,121 @@
+package main
+
+import (
+       "flag"
+       "runtime"
+
+       "github.com/onsi/ginkgo/config"
+)
+
+type RunWatchAndBuildCommandFlags struct {
+       Recurse     bool
+       Race        bool
+       Cover       bool
+       CoverPkg    string
+       SkipPackage string
+       Tags        string
+
+       //for run and watch commands
+       NumCPU         int
+       NumCompilers   int
+       ParallelStream bool
+       Notify         bool
+       AfterSuiteHook string
+       AutoNodes      bool
+
+       //only for run command
+       KeepGoing       bool
+       UntilItFails    bool
+       RandomizeSuites bool
+
+       //only for watch command
+       Depth int
+
+       FlagSet *flag.FlagSet
+}
+
+const runMode = 1
+const watchMode = 2
+const buildMode = 3
+
+func NewRunCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
+       c := &RunWatchAndBuildCommandFlags{
+               FlagSet: flagSet,
+       }
+       c.flags(runMode)
+       return c
+}
+
+func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
+       c := &RunWatchAndBuildCommandFlags{
+               FlagSet: flagSet,
+       }
+       c.flags(watchMode)
+       return c
+}
+
+func NewBuildCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
+       c := &RunWatchAndBuildCommandFlags{
+               FlagSet: flagSet,
+       }
+       c.flags(buildMode)
+       return c
+}
+
+func (c *RunWatchAndBuildCommandFlags) wasSet(flagName string) bool {
+       wasSet := false
+       c.FlagSet.Visit(func(f *flag.Flag) {
+               if f.Name == flagName {
+                       wasSet = true
+               }
+       })
+
+       return wasSet
+}
+
+func (c *RunWatchAndBuildCommandFlags) computeNodes() {
+       if c.wasSet("nodes") {
+               return
+       }
+       if c.AutoNodes {
+               switch n := runtime.NumCPU(); {
+               case n <= 4:
+                       c.NumCPU = n
+               default:
+                       c.NumCPU = n - 1
+               }
+       }
+}
+
+func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
+       onWindows := (runtime.GOOS == "windows")
+
+       c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively")
+       c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled")
+       c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory")
+       c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules")
+       c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped.  If any part of the package's path matches, that package is ignored.")
+       c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
+
+       if mode == runMode || mode == watchMode {
+               config.Flags(c.FlagSet, "", false)
+               c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run")
+               c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
+               c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
+               c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
+               if !onWindows {
+                       c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
+               }
+               c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
+       }
+
+       if mode == runMode {
+               c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running")
+               c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs")
+               c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run")
+       }
+
+       if mode == watchMode {
+               c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree")
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/suite_runner.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/suite_runner.go
new file mode 100644 (file)
index 0000000..7d56e5f
--- /dev/null
@@ -0,0 +1,172 @@
+package main
+
+import (
+       "fmt"
+       "runtime"
+       "sync"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/ginkgo/interrupthandler"
+       "github.com/onsi/ginkgo/ginkgo/testrunner"
+       "github.com/onsi/ginkgo/ginkgo/testsuite"
+)
+
+type compilationInput struct {
+       runner *testrunner.TestRunner
+       result chan compilationOutput
+}
+
+type compilationOutput struct {
+       runner *testrunner.TestRunner
+       err    error
+}
+
+type SuiteRunner struct {
+       notifier         *Notifier
+       interruptHandler *interrupthandler.InterruptHandler
+}
+
+func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
+       return &SuiteRunner{
+               notifier:         notifier,
+               interruptHandler: interruptHandler,
+       }
+}
+
+func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
+       //we return this to the consumer, it will return each runner in order as it compiles
+       compilationOutputs := make(chan compilationOutput, len(runners))
+
+       //an array of channels - the nth runner's compilation output is sent to the nth channel in this array
+       //we read from these channels in order to ensure we run the suites in order
+       orderedCompilationOutputs := []chan compilationOutput{}
+       for _ = range runners {
+               orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
+       }
+
+       //we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
+       //we prefill the channel then close it, this ensures we compile things in the correct order
+       workPool := make(chan compilationInput, len(runners))
+       for i, runner := range runners {
+               workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
+       }
+       close(workPool)
+
+       //pick a reasonable numCompilers
+       if numCompilers == 0 {
+               numCompilers = runtime.NumCPU()
+       }
+
+       //a WaitGroup to help us wait for all compilers to shut down
+       wg := &sync.WaitGroup{}
+       wg.Add(numCompilers)
+
+       //spin up the concurrent compilers
+       for i := 0; i < numCompilers; i++ {
+               go func() {
+                       defer wg.Done()
+                       for input := range workPool {
+                               if r.interruptHandler.WasInterrupted() {
+                                       return
+                               }
+
+                               if willCompile != nil {
+                                       willCompile(input.runner.Suite)
+                               }
+
+                               //We retry because Go sometimes steps on itself when multiple compiles happen in parallel.  This is ugly, but should help resolve flakiness...
+                               var err error
+                               retries := 0
+                               for retries <= 5 {
+                                       if r.interruptHandler.WasInterrupted() {
+                                               return
+                                       }
+                                       if err = input.runner.Compile(); err == nil {
+                                               break
+                                       }
+                                       retries++
+                               }
+
+                               input.result <- compilationOutput{input.runner, err}
+                       }
+               }()
+       }
+
+       //read from the compilation output channels *in order* and send them to the caller
+       //close the compilationOutputs channel to tell the caller we're done
+       go func() {
+               defer close(compilationOutputs)
+               for _, orderedCompilationOutput := range orderedCompilationOutputs {
+                       select {
+                       case compilationOutput := <-orderedCompilationOutput:
+                               compilationOutputs <- compilationOutput
+                       case <-r.interruptHandler.C:
+                               //interrupt detected, wait for the compilers to shut down then bail
+                               //this ensure we clean up after ourselves as we don't leave any compilation processes running
+                               wg.Wait()
+                               return
+                       }
+               }
+       }()
+
+       return compilationOutputs
+}
+
+func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
+       runResult := testrunner.PassingRunResult()
+
+       compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
+
+       numSuitesThatRan := 0
+       suitesThatFailed := []testsuite.TestSuite{}
+       for compilationOutput := range compilationOutputs {
+               if compilationOutput.err != nil {
+                       fmt.Print(compilationOutput.err.Error())
+               }
+               numSuitesThatRan++
+               suiteRunResult := testrunner.FailingRunResult()
+               if compilationOutput.err == nil {
+                       suiteRunResult = compilationOutput.runner.Run()
+               }
+               r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
+               r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed)
+               runResult = runResult.Merge(suiteRunResult)
+               if !suiteRunResult.Passed {
+                       suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
+                       if !keepGoing {
+                               break
+                       }
+               }
+               if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
+                       fmt.Println("")
+               }
+       }
+
+       if keepGoing && !runResult.Passed {
+               r.listFailedSuites(suitesThatFailed)
+       }
+
+       return runResult, numSuitesThatRan
+}
+
+func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) {
+       fmt.Println("")
+       fmt.Println("There were failures detected in the following suites:")
+
+       maxPackageNameLength := 0
+       for _, suite := range suitesThatFailed {
+               if len(suite.PackageName) > maxPackageNameLength {
+                       maxPackageNameLength = len(suite.PackageName)
+               }
+       }
+
+       packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
+
+       for _, suite := range suitesThatFailed {
+               if config.DefaultReporterConfig.NoColor {
+                       fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path)
+               } else {
+                       fmt.Printf("\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle)
+               }
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
new file mode 100644 (file)
index 0000000..a73a6e3
--- /dev/null
@@ -0,0 +1,52 @@
+package testrunner
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "log"
+       "strings"
+       "sync"
+)
+
+type logWriter struct {
+       buffer *bytes.Buffer
+       lock   *sync.Mutex
+       log    *log.Logger
+}
+
+func newLogWriter(target io.Writer, node int) *logWriter {
+       return &logWriter{
+               buffer: &bytes.Buffer{},
+               lock:   &sync.Mutex{},
+               log:    log.New(target, fmt.Sprintf("[%d] ", node), 0),
+       }
+}
+
+func (w *logWriter) Write(data []byte) (n int, err error) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       w.buffer.Write(data)
+       contents := w.buffer.String()
+
+       lines := strings.Split(contents, "\n")
+       for _, line := range lines[0 : len(lines)-1] {
+               w.log.Println(line)
+       }
+
+       w.buffer.Reset()
+       w.buffer.Write([]byte(lines[len(lines)-1]))
+       return len(data), nil
+}
+
+func (w *logWriter) Close() error {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       if w.buffer.Len() > 0 {
+               w.log.Println(w.buffer.String())
+       }
+
+       return nil
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
new file mode 100644 (file)
index 0000000..5d472ac
--- /dev/null
@@ -0,0 +1,27 @@
+package testrunner
+
+type RunResult struct {
+       Passed               bool
+       HasProgrammaticFocus bool
+}
+
+func PassingRunResult() RunResult {
+       return RunResult{
+               Passed:               true,
+               HasProgrammaticFocus: false,
+       }
+}
+
+func FailingRunResult() RunResult {
+       return RunResult{
+               Passed:               false,
+               HasProgrammaticFocus: false,
+       }
+}
+
+func (r RunResult) Merge(o RunResult) RunResult {
+       return RunResult{
+               Passed:               r.Passed && o.Passed,
+               HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus,
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
new file mode 100644 (file)
index 0000000..a1e47ba
--- /dev/null
@@ -0,0 +1,460 @@
+package testrunner
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "regexp"
+       "strconv"
+       "strings"
+       "syscall"
+       "time"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/ginkgo/testsuite"
+       "github.com/onsi/ginkgo/internal/remote"
+       "github.com/onsi/ginkgo/reporters/stenographer"
+       "github.com/onsi/ginkgo/types"
+)
+
+type TestRunner struct {
+       Suite testsuite.TestSuite
+
+       compiled              bool
+       compilationTargetPath string
+
+       numCPU         int
+       parallelStream bool
+       race           bool
+       cover          bool
+       coverPkg       string
+       tags           string
+       additionalArgs []string
+}
+
+func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, additionalArgs []string) *TestRunner {
+       runner := &TestRunner{
+               Suite:          suite,
+               numCPU:         numCPU,
+               parallelStream: parallelStream,
+               race:           race,
+               cover:          cover,
+               coverPkg:       coverPkg,
+               tags:           tags,
+               additionalArgs: additionalArgs,
+       }
+
+       if !suite.Precompiled {
+               dir, err := ioutil.TempDir("", "ginkgo")
+               if err != nil {
+                       panic(fmt.Sprintf("coulnd't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
+               }
+               runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
+       }
+
+       return runner
+}
+
+func (t *TestRunner) Compile() error {
+       return t.CompileTo(t.compilationTargetPath)
+}
+
+func (t *TestRunner) CompileTo(path string) error {
+       if t.compiled {
+               return nil
+       }
+
+       if t.Suite.Precompiled {
+               return nil
+       }
+
+       args := []string{"test", "-c", "-i", "-o", path}
+       if t.race {
+               args = append(args, "-race")
+       }
+       if t.cover || t.coverPkg != "" {
+               args = append(args, "-cover", "-covermode=atomic")
+       }
+       if t.coverPkg != "" {
+               args = append(args, fmt.Sprintf("-coverpkg=%s", t.coverPkg))
+       }
+       if t.tags != "" {
+               args = append(args, fmt.Sprintf("-tags=%s", t.tags))
+       }
+
+       cmd := exec.Command("go", args...)
+
+       cmd.Dir = t.Suite.Path
+
+       output, err := cmd.CombinedOutput()
+
+       if err != nil {
+               fixedOutput := fixCompilationOutput(string(output), t.Suite.Path)
+               if len(output) > 0 {
+                       return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
+               }
+               return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
+       }
+
+       if fileExists(path) == false {
+               compiledFile := filepath.Join(t.Suite.Path, t.Suite.PackageName+".test")
+               if fileExists(compiledFile) {
+                       // seems like we are on an old go version that does not support the -o flag on go test
+                       // move the compiled test file to the desired location by hand
+                       err = os.Rename(compiledFile, path)
+                       if err != nil {
+                               // We cannot move the file, perhaps because the source and destination
+                               // are on different partitions. We can copy the file, however.
+                               err = copyFile(compiledFile, path)
+                               if err != nil {
+                                       return fmt.Errorf("Failed to copy compiled file: %s", err)
+                               }
+                       }
+               } else {
+                       return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
+               }
+       }
+
+       t.compiled = true
+
+       return nil
+}
+
+func fileExists(path string) bool {
+       _, err := os.Stat(path)
+       return err == nil || os.IsNotExist(err) == false
+}
+
+// copyFile copies the contents of the file named src to the file named
+// by dst. The file will be created if it does not already exist. If the
+// destination file exists, all it's contents will be replaced by the contents
+// of the source file.
+func copyFile(src, dst string) error {
+       srcInfo, err := os.Stat(src)
+       if err != nil {
+               return err
+       }
+       mode := srcInfo.Mode()
+
+       in, err := os.Open(src)
+       if err != nil {
+               return err
+       }
+
+       defer in.Close()
+
+       out, err := os.Create(dst)
+       if err != nil {
+               return err
+       }
+
+       defer func() {
+               closeErr := out.Close()
+               if err == nil {
+                       err = closeErr
+               }
+       }()
+
+       _, err = io.Copy(out, in)
+       if err != nil {
+               return err
+       }
+
+       err = out.Sync()
+       if err != nil {
+               return err
+       }
+
+       return out.Chmod(mode)
+}
+
+/*
+go test -c -i spits package.test out into the cwd. there's no way to change this.
+
+to make sure it doesn't generate conflicting .test files in the cwd, Compile() must switch the cwd to the test package.
+
+unfortunately, this causes go test's compile output to be expressed *relative to the test package* instead of the cwd.
+
+this makes it hard to reason about what failed, and also prevents iterm's Cmd+click from working.
+
+fixCompilationOutput..... rewrites the output to fix the paths.
+
+yeah......
+*/
+func fixCompilationOutput(output string, relToPath string) string {
+       re := regexp.MustCompile(`^(\S.*\.go)\:\d+\:`)
+       lines := strings.Split(output, "\n")
+       for i, line := range lines {
+               indices := re.FindStringSubmatchIndex(line)
+               if len(indices) == 0 {
+                       continue
+               }
+
+               path := line[indices[2]:indices[3]]
+               path = filepath.Join(relToPath, path)
+               lines[i] = path + line[indices[3]:]
+       }
+       return strings.Join(lines, "\n")
+}
+
+func (t *TestRunner) Run() RunResult {
+       if t.Suite.IsGinkgo {
+               if t.numCPU > 1 {
+                       if t.parallelStream {
+                               return t.runAndStreamParallelGinkgoSuite()
+                       } else {
+                               return t.runParallelGinkgoSuite()
+                       }
+               } else {
+                       return t.runSerialGinkgoSuite()
+               }
+       } else {
+               return t.runGoTestSuite()
+       }
+}
+
+func (t *TestRunner) CleanUp() {
+       if t.Suite.Precompiled {
+               return
+       }
+       os.RemoveAll(filepath.Dir(t.compilationTargetPath))
+}
+
+func (t *TestRunner) runSerialGinkgoSuite() RunResult {
+       ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
+       return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil)
+}
+
+func (t *TestRunner) runGoTestSuite() RunResult {
+       return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil)
+}
+
+func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
+       completions := make(chan RunResult)
+       writers := make([]*logWriter, t.numCPU)
+
+       server, err := remote.NewServer(t.numCPU)
+       if err != nil {
+               panic("Failed to start parallel spec server")
+       }
+
+       server.Start()
+       defer server.Close()
+
+       for cpu := 0; cpu < t.numCPU; cpu++ {
+               config.GinkgoConfig.ParallelNode = cpu + 1
+               config.GinkgoConfig.ParallelTotal = t.numCPU
+               config.GinkgoConfig.SyncHost = server.Address()
+
+               ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
+
+               writers[cpu] = newLogWriter(os.Stdout, cpu+1)
+
+               cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
+
+               server.RegisterAlive(cpu+1, func() bool {
+                       if cmd.ProcessState == nil {
+                               return true
+                       }
+                       return !cmd.ProcessState.Exited()
+               })
+
+               go t.run(cmd, completions)
+       }
+
+       res := PassingRunResult()
+
+       for cpu := 0; cpu < t.numCPU; cpu++ {
+               res = res.Merge(<-completions)
+       }
+
+       for _, writer := range writers {
+               writer.Close()
+       }
+
+       os.Stdout.Sync()
+
+       if t.cover || t.coverPkg != "" {
+               t.combineCoverprofiles()
+       }
+
+       return res
+}
+
+func (t *TestRunner) runParallelGinkgoSuite() RunResult {
+       result := make(chan bool)
+       completions := make(chan RunResult)
+       writers := make([]*logWriter, t.numCPU)
+       reports := make([]*bytes.Buffer, t.numCPU)
+
+       stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
+       aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
+
+       server, err := remote.NewServer(t.numCPU)
+       if err != nil {
+               panic("Failed to start parallel spec server")
+       }
+       server.RegisterReporters(aggregator)
+       server.Start()
+       defer server.Close()
+
+       for cpu := 0; cpu < t.numCPU; cpu++ {
+               config.GinkgoConfig.ParallelNode = cpu + 1
+               config.GinkgoConfig.ParallelTotal = t.numCPU
+               config.GinkgoConfig.SyncHost = server.Address()
+               config.GinkgoConfig.StreamHost = server.Address()
+
+               ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
+
+               reports[cpu] = &bytes.Buffer{}
+               writers[cpu] = newLogWriter(reports[cpu], cpu+1)
+
+               cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
+
+               server.RegisterAlive(cpu+1, func() bool {
+                       if cmd.ProcessState == nil {
+                               return true
+                       }
+                       return !cmd.ProcessState.Exited()
+               })
+
+               go t.run(cmd, completions)
+       }
+
+       res := PassingRunResult()
+
+       for cpu := 0; cpu < t.numCPU; cpu++ {
+               res = res.Merge(<-completions)
+       }
+
+       //all test processes are done, at this point
+       //we should be able to wait for the aggregator to tell us that it's done
+
+       select {
+       case <-result:
+               fmt.Println("")
+       case <-time.After(time.Second):
+               //the aggregator never got back to us!  something must have gone wrong
+               fmt.Println(`
+        -------------------------------------------------------------------
+       |                                                                   |
+       |  Ginkgo timed out waiting for all parallel nodes to report back!  |
+       |                                                                   |
+        -------------------------------------------------------------------
+`)
+
+               os.Stdout.Sync()
+
+               for _, writer := range writers {
+                       writer.Close()
+               }
+
+               for _, report := range reports {
+                       fmt.Print(report.String())
+               }
+
+               os.Stdout.Sync()
+       }
+
+       if t.cover || t.coverPkg != "" {
+               t.combineCoverprofiles()
+       }
+
+       return res
+}
+
+func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
+       args := []string{"--test.timeout=24h"}
+       if t.cover || t.coverPkg != "" {
+               coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
+               if t.numCPU > 1 {
+                       coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
+               }
+               args = append(args, coverprofile)
+       }
+
+       args = append(args, ginkgoArgs...)
+       args = append(args, t.additionalArgs...)
+
+       path := t.compilationTargetPath
+       if t.Suite.Precompiled {
+               path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
+       }
+
+       cmd := exec.Command(path, args...)
+
+       cmd.Dir = t.Suite.Path
+       cmd.Stderr = stream
+       cmd.Stdout = stream
+
+       return cmd
+}
+
+func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
+       var res RunResult
+
+       defer func() {
+               if completions != nil {
+                       completions <- res
+               }
+       }()
+
+       err := cmd.Start()
+       if err != nil {
+               fmt.Printf("Failed to run test suite!\n\t%s", err.Error())
+               return res
+       }
+
+       cmd.Wait()
+       exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+       res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+       res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+
+       return res
+}
+
+func (t *TestRunner) combineCoverprofiles() {
+       profiles := []string{}
+       for cpu := 1; cpu <= t.numCPU; cpu++ {
+               coverFile := fmt.Sprintf("%s.coverprofile.%d", t.Suite.PackageName, cpu)
+               coverFile = filepath.Join(t.Suite.Path, coverFile)
+               coverProfile, err := ioutil.ReadFile(coverFile)
+               os.Remove(coverFile)
+
+               if err == nil {
+                       profiles = append(profiles, string(coverProfile))
+               }
+       }
+
+       if len(profiles) != t.numCPU {
+               return
+       }
+
+       lines := map[string]int{}
+       lineOrder := []string{}
+       for i, coverProfile := range profiles {
+               for _, line := range strings.Split(string(coverProfile), "\n")[1:] {
+                       if len(line) == 0 {
+                               continue
+                       }
+                       components := strings.Split(line, " ")
+                       count, _ := strconv.Atoi(components[len(components)-1])
+                       prefix := strings.Join(components[0:len(components)-1], " ")
+                       lines[prefix] += count
+                       if i == 0 {
+                               lineOrder = append(lineOrder, prefix)
+                       }
+               }
+       }
+
+       output := []string{"mode: atomic"}
+       for _, line := range lineOrder {
+               output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
+       }
+       finalOutput := strings.Join(output, "\n")
+       ioutil.WriteFile(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.coverprofile", t.Suite.PackageName)), []byte(finalOutput), 0666)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
new file mode 100644 (file)
index 0000000..cc7d2f4
--- /dev/null
@@ -0,0 +1,106 @@
+package testsuite
+
+import (
+       "errors"
+       "io/ioutil"
+       "os"
+       "path/filepath"
+       "regexp"
+       "strings"
+)
+
+type TestSuite struct {
+       Path        string
+       PackageName string
+       IsGinkgo    bool
+       Precompiled bool
+}
+
+func PrecompiledTestSuite(path string) (TestSuite, error) {
+       info, err := os.Stat(path)
+       if err != nil {
+               return TestSuite{}, err
+       }
+
+       if info.IsDir() {
+               return TestSuite{}, errors.New("this is a directory, not a file")
+       }
+
+       if filepath.Ext(path) != ".test" {
+               return TestSuite{}, errors.New("this is not a .test binary")
+       }
+
+       if info.Mode()&0111 == 0 {
+               return TestSuite{}, errors.New("this is not executable")
+       }
+
+       dir := relPath(filepath.Dir(path))
+       packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
+
+       return TestSuite{
+               Path:        dir,
+               PackageName: packageName,
+               IsGinkgo:    true,
+               Precompiled: true,
+       }, nil
+}
+
+func SuitesInDir(dir string, recurse bool) []TestSuite {
+       suites := []TestSuite{}
+       files, _ := ioutil.ReadDir(dir)
+       re := regexp.MustCompile(`_test\.go$`)
+       for _, file := range files {
+               if !file.IsDir() && re.Match([]byte(file.Name())) {
+                       suites = append(suites, New(dir, files))
+                       break
+               }
+       }
+
+       if recurse {
+               re = regexp.MustCompile(`^[._]`)
+               for _, file := range files {
+                       if file.IsDir() && !re.Match([]byte(file.Name())) {
+                               suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...)
+                       }
+               }
+       }
+
+       return suites
+}
+
+func relPath(dir string) string {
+       dir, _ = filepath.Abs(dir)
+       cwd, _ := os.Getwd()
+       dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
+       dir = "." + string(filepath.Separator) + dir
+       return dir
+}
+
+func New(dir string, files []os.FileInfo) TestSuite {
+       return TestSuite{
+               Path:        relPath(dir),
+               PackageName: packageNameForSuite(dir),
+               IsGinkgo:    filesHaveGinkgoSuite(dir, files),
+       }
+}
+
+func packageNameForSuite(dir string) string {
+       path, _ := filepath.Abs(dir)
+       return filepath.Base(path)
+}
+
+func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool {
+       reTestFile := regexp.MustCompile(`_test\.go$`)
+       reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`)
+
+       for _, file := range files {
+               if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
+                       contents, _ := ioutil.ReadFile(dir + "/" + file.Name())
+                       if reGinkgo.Match(contents) {
+                               return true
+                       }
+               }
+       }
+
+       return false
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
new file mode 100644 (file)
index 0000000..683c3a9
--- /dev/null
@@ -0,0 +1,38 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+       "os/exec"
+)
+
+func BuildUnfocusCommand() *Command {
+       return &Command{
+               Name:         "unfocus",
+               AltName:      "blur",
+               FlagSet:      flag.NewFlagSet("unfocus", flag.ExitOnError),
+               UsageCommand: "ginkgo unfocus (or ginkgo blur)",
+               Usage: []string{
+                       "Recursively unfocuses any focused tests under the current directory",
+               },
+               Command: unfocusSpecs,
+       }
+}
+
+func unfocusSpecs([]string, []string) {
+       unfocus("Describe")
+       unfocus("Context")
+       unfocus("It")
+       unfocus("Measure")
+       unfocus("DescribeTable")
+       unfocus("Entry")
+}
+
+func unfocus(component string) {
+       fmt.Printf("Removing F%s...\n", component)
+       cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", ".")
+       out, _ := cmd.CombinedOutput()
+       if string(out) != "" {
+               println(string(out))
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/version_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/version_command.go
new file mode 100644 (file)
index 0000000..cdca3a3
--- /dev/null
@@ -0,0 +1,23 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+       "github.com/onsi/ginkgo/config"
+)
+
+func BuildVersionCommand() *Command {
+       return &Command{
+               Name:         "version",
+               FlagSet:      flag.NewFlagSet("version", flag.ExitOnError),
+               UsageCommand: "ginkgo version",
+               Usage: []string{
+                       "Print Ginkgo's version",
+               },
+               Command: printVersion,
+       }
+}
+
+func printVersion([]string, []string) {
+       fmt.Printf("Ginkgo Version %s\n", config.VERSION)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta.go
new file mode 100644 (file)
index 0000000..6c485c5
--- /dev/null
@@ -0,0 +1,22 @@
+package watch
+
+import "sort"
+
+type Delta struct {
+       ModifiedPackages []string
+
+       NewSuites      []*Suite
+       RemovedSuites  []*Suite
+       modifiedSuites []*Suite
+}
+
+type DescendingByDelta []*Suite
+
+func (a DescendingByDelta) Len() int           { return len(a) }
+func (a DescendingByDelta) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
+
+func (d Delta) ModifiedSuites() []*Suite {
+       sort.Sort(DescendingByDelta(d.modifiedSuites))
+       return d.modifiedSuites
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
new file mode 100644 (file)
index 0000000..452c07e
--- /dev/null
@@ -0,0 +1,71 @@
+package watch
+
+import (
+       "fmt"
+
+       "github.com/onsi/ginkgo/ginkgo/testsuite"
+)
+
+type SuiteErrors map[testsuite.TestSuite]error
+
+type DeltaTracker struct {
+       maxDepth      int
+       suites        map[string]*Suite
+       packageHashes *PackageHashes
+}
+
+func NewDeltaTracker(maxDepth int) *DeltaTracker {
+       return &DeltaTracker{
+               maxDepth:      maxDepth,
+               packageHashes: NewPackageHashes(),
+               suites:        map[string]*Suite{},
+       }
+}
+
+func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) {
+       errors = SuiteErrors{}
+       delta.ModifiedPackages = d.packageHashes.CheckForChanges()
+
+       providedSuitePaths := map[string]bool{}
+       for _, suite := range suites {
+               providedSuitePaths[suite.Path] = true
+       }
+
+       d.packageHashes.StartTrackingUsage()
+
+       for _, suite := range d.suites {
+               if providedSuitePaths[suite.Suite.Path] {
+                       if suite.Delta() > 0 {
+                               delta.modifiedSuites = append(delta.modifiedSuites, suite)
+                       }
+               } else {
+                       delta.RemovedSuites = append(delta.RemovedSuites, suite)
+               }
+       }
+
+       d.packageHashes.StopTrackingUsageAndPrune()
+
+       for _, suite := range suites {
+               _, ok := d.suites[suite.Path]
+               if !ok {
+                       s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
+                       if err != nil {
+                               errors[suite] = err
+                               continue
+                       }
+                       d.suites[suite.Path] = s
+                       delta.NewSuites = append(delta.NewSuites, s)
+               }
+       }
+
+       return delta, errors
+}
+
+func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error {
+       s, ok := d.suites[suite.Path]
+       if !ok {
+               return fmt.Errorf("unknown suite %s", suite.Path)
+       }
+
+       return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
new file mode 100644 (file)
index 0000000..82c25fa
--- /dev/null
@@ -0,0 +1,91 @@
+package watch
+
+import (
+       "go/build"
+       "regexp"
+)
+
+var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
+
+type Dependencies struct {
+       deps map[string]int
+}
+
+func NewDependencies(path string, maxDepth int) (Dependencies, error) {
+       d := Dependencies{
+               deps: map[string]int{},
+       }
+
+       if maxDepth == 0 {
+               return d, nil
+       }
+
+       err := d.seedWithDepsForPackageAtPath(path)
+       if err != nil {
+               return d, err
+       }
+
+       for depth := 1; depth < maxDepth; depth++ {
+               n := len(d.deps)
+               d.addDepsForDepth(depth)
+               if n == len(d.deps) {
+                       break
+               }
+       }
+
+       return d, nil
+}
+
+func (d Dependencies) Dependencies() map[string]int {
+       return d.deps
+}
+
+func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
+       pkg, err := build.ImportDir(path, 0)
+       if err != nil {
+               return err
+       }
+
+       d.resolveAndAdd(pkg.Imports, 1)
+       d.resolveAndAdd(pkg.TestImports, 1)
+       d.resolveAndAdd(pkg.XTestImports, 1)
+
+       delete(d.deps, pkg.Dir)
+       return nil
+}
+
+func (d Dependencies) addDepsForDepth(depth int) {
+       for dep, depDepth := range d.deps {
+               if depDepth == depth {
+                       d.addDepsForDep(dep, depth+1)
+               }
+       }
+}
+
+func (d Dependencies) addDepsForDep(dep string, depth int) {
+       pkg, err := build.ImportDir(dep, 0)
+       if err != nil {
+               println(err.Error())
+               return
+       }
+       d.resolveAndAdd(pkg.Imports, depth)
+}
+
+func (d Dependencies) resolveAndAdd(deps []string, depth int) {
+       for _, dep := range deps {
+               pkg, err := build.Import(dep, ".", 0)
+               if err != nil {
+                       continue
+               }
+               if pkg.Goroot == false && !ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) {
+                       d.addDepIfNotPresent(pkg.Dir, depth)
+               }
+       }
+}
+
+func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
+       _, ok := d.deps[dep]
+       if !ok {
+               d.deps[dep] = depth
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
new file mode 100644 (file)
index 0000000..eaf357c
--- /dev/null
@@ -0,0 +1,103 @@
+package watch
+
+import (
+       "fmt"
+       "io/ioutil"
+       "os"
+       "regexp"
+       "time"
+)
+
+var goRegExp = regexp.MustCompile(`\.go$`)
+var goTestRegExp = regexp.MustCompile(`_test\.go$`)
+
+type PackageHash struct {
+       CodeModifiedTime time.Time
+       TestModifiedTime time.Time
+       Deleted          bool
+
+       path     string
+       codeHash string
+       testHash string
+}
+
+func NewPackageHash(path string) *PackageHash {
+       p := &PackageHash{
+               path: path,
+       }
+
+       p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
+
+       return p
+}
+
+func (p *PackageHash) CheckForChanges() bool {
+       codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
+
+       if deleted {
+               if p.Deleted == false {
+                       t := time.Now()
+                       p.CodeModifiedTime = t
+                       p.TestModifiedTime = t
+               }
+               p.Deleted = true
+               return true
+       }
+
+       modified := false
+       p.Deleted = false
+
+       if p.codeHash != codeHash {
+               p.CodeModifiedTime = codeModifiedTime
+               modified = true
+       }
+       if p.testHash != testHash {
+               p.TestModifiedTime = testModifiedTime
+               modified = true
+       }
+
+       p.codeHash = codeHash
+       p.testHash = testHash
+       return modified
+}
+
+func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
+       infos, err := ioutil.ReadDir(p.path)
+
+       if err != nil {
+               deleted = true
+               return
+       }
+
+       for _, info := range infos {
+               if info.IsDir() {
+                       continue
+               }
+
+               if goTestRegExp.Match([]byte(info.Name())) {
+                       testHash += p.hashForFileInfo(info)
+                       if info.ModTime().After(testModifiedTime) {
+                               testModifiedTime = info.ModTime()
+                       }
+                       continue
+               }
+
+               if goRegExp.Match([]byte(info.Name())) {
+                       codeHash += p.hashForFileInfo(info)
+                       if info.ModTime().After(codeModifiedTime) {
+                               codeModifiedTime = info.ModTime()
+                       }
+               }
+       }
+
+       testHash += codeHash
+       if codeModifiedTime.After(testModifiedTime) {
+               testModifiedTime = codeModifiedTime
+       }
+
+       return
+}
+
+func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
+       return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
new file mode 100644 (file)
index 0000000..262eaa8
--- /dev/null
@@ -0,0 +1,82 @@
+package watch
+
+import (
+       "path/filepath"
+       "sync"
+)
+
+type PackageHashes struct {
+       PackageHashes map[string]*PackageHash
+       usedPaths     map[string]bool
+       lock          *sync.Mutex
+}
+
+func NewPackageHashes() *PackageHashes {
+       return &PackageHashes{
+               PackageHashes: map[string]*PackageHash{},
+               usedPaths:     nil,
+               lock:          &sync.Mutex{},
+       }
+}
+
+func (p *PackageHashes) CheckForChanges() []string {
+       p.lock.Lock()
+       defer p.lock.Unlock()
+
+       modified := []string{}
+
+       for _, packageHash := range p.PackageHashes {
+               if packageHash.CheckForChanges() {
+                       modified = append(modified, packageHash.path)
+               }
+       }
+
+       return modified
+}
+
+func (p *PackageHashes) Add(path string) *PackageHash {
+       p.lock.Lock()
+       defer p.lock.Unlock()
+
+       path, _ = filepath.Abs(path)
+       _, ok := p.PackageHashes[path]
+       if !ok {
+               p.PackageHashes[path] = NewPackageHash(path)
+       }
+
+       if p.usedPaths != nil {
+               p.usedPaths[path] = true
+       }
+       return p.PackageHashes[path]
+}
+
+func (p *PackageHashes) Get(path string) *PackageHash {
+       p.lock.Lock()
+       defer p.lock.Unlock()
+
+       path, _ = filepath.Abs(path)
+       if p.usedPaths != nil {
+               p.usedPaths[path] = true
+       }
+       return p.PackageHashes[path]
+}
+
+func (p *PackageHashes) StartTrackingUsage() {
+       p.lock.Lock()
+       defer p.lock.Unlock()
+
+       p.usedPaths = map[string]bool{}
+}
+
+func (p *PackageHashes) StopTrackingUsageAndPrune() {
+       p.lock.Lock()
+       defer p.lock.Unlock()
+
+       for path := range p.PackageHashes {
+               if !p.usedPaths[path] {
+                       delete(p.PackageHashes, path)
+               }
+       }
+
+       p.usedPaths = nil
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/suite.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/suite.go
new file mode 100644 (file)
index 0000000..5deaba7
--- /dev/null
@@ -0,0 +1,87 @@
+package watch
+
+import (
+       "fmt"
+       "math"
+       "time"
+
+       "github.com/onsi/ginkgo/ginkgo/testsuite"
+)
+
+type Suite struct {
+       Suite        testsuite.TestSuite
+       RunTime      time.Time
+       Dependencies Dependencies
+
+       sharedPackageHashes *PackageHashes
+}
+
+func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
+       deps, err := NewDependencies(suite.Path, maxDepth)
+       if err != nil {
+               return nil, err
+       }
+
+       sharedPackageHashes.Add(suite.Path)
+       for dep := range deps.Dependencies() {
+               sharedPackageHashes.Add(dep)
+       }
+
+       return &Suite{
+               Suite:        suite,
+               Dependencies: deps,
+
+               sharedPackageHashes: sharedPackageHashes,
+       }, nil
+}
+
+func (s *Suite) Delta() float64 {
+       delta := s.delta(s.Suite.Path, true, 0) * 1000
+       for dep, depth := range s.Dependencies.Dependencies() {
+               delta += s.delta(dep, false, depth)
+       }
+       return delta
+}
+
+func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
+       s.RunTime = time.Now()
+
+       deps, err := NewDependencies(s.Suite.Path, maxDepth)
+       if err != nil {
+               return err
+       }
+
+       s.sharedPackageHashes.Add(s.Suite.Path)
+       for dep := range deps.Dependencies() {
+               s.sharedPackageHashes.Add(dep)
+       }
+
+       s.Dependencies = deps
+
+       return nil
+}
+
+func (s *Suite) Description() string {
+       numDeps := len(s.Dependencies.Dependencies())
+       pluralizer := "ies"
+       if numDeps == 1 {
+               pluralizer = "y"
+       }
+       return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
+}
+
+func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
+       return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
+}
+
+func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
+       packageHash := s.sharedPackageHashes.Get(packagePath)
+       var modifiedTime time.Time
+       if includeTests {
+               modifiedTime = packageHash.TestModifiedTime
+       } else {
+               modifiedTime = packageHash.CodeModifiedTime
+       }
+
+       return modifiedTime.Sub(s.RunTime)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go
new file mode 100644 (file)
index 0000000..03ea012
--- /dev/null
@@ -0,0 +1,172 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+       "time"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/ginkgo/interrupthandler"
+       "github.com/onsi/ginkgo/ginkgo/testrunner"
+       "github.com/onsi/ginkgo/ginkgo/testsuite"
+       "github.com/onsi/ginkgo/ginkgo/watch"
+)
+
+func BuildWatchCommand() *Command {
+       commandFlags := NewWatchCommandFlags(flag.NewFlagSet("watch", flag.ExitOnError))
+       interruptHandler := interrupthandler.NewInterruptHandler()
+       notifier := NewNotifier(commandFlags)
+       watcher := &SpecWatcher{
+               commandFlags:     commandFlags,
+               notifier:         notifier,
+               interruptHandler: interruptHandler,
+               suiteRunner:      NewSuiteRunner(notifier, interruptHandler),
+       }
+
+       return &Command{
+               Name:         "watch",
+               FlagSet:      commandFlags.FlagSet,
+               UsageCommand: "ginkgo watch <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
+               Usage: []string{
+                       "Watches the tests in the passed in <PACKAGES> and runs them when changes occur.",
+                       "Any arguments after -- will be passed to the test.",
+               },
+               Command:                   watcher.WatchSpecs,
+               SuppressFlagDocumentation: true,
+               FlagDocSubstitute: []string{
+                       "Accepts all the flags that the ginkgo command accepts except for --keepGoing and --untilItFails",
+               },
+       }
+}
+
+type SpecWatcher struct {
+       commandFlags     *RunWatchAndBuildCommandFlags
+       notifier         *Notifier
+       interruptHandler *interrupthandler.InterruptHandler
+       suiteRunner      *SuiteRunner
+}
+
+func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
+       w.commandFlags.computeNodes()
+       w.notifier.VerifyNotificationsAreAvailable()
+
+       w.WatchSuites(args, additionalArgs)
+}
+
+func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalArgs []string) []*testrunner.TestRunner {
+       runners := []*testrunner.TestRunner{}
+
+       for _, suite := range suites {
+               runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, additionalArgs))
+       }
+
+       return runners
+}
+
+func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) {
+       suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
+
+       if len(suites) == 0 {
+               complainAndQuit("Found no test suites")
+       }
+
+       fmt.Printf("Identified %d test %s.  Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth)
+       deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth)
+       delta, errors := deltaTracker.Delta(suites)
+
+       fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
+       for _, suite := range delta.NewSuites {
+               fmt.Println("  " + suite.Description())
+       }
+
+       for suite, err := range errors {
+               fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
+       }
+
+       if len(suites) == 1 {
+               runners := w.runnersForSuites(suites, additionalArgs)
+               w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, nil)
+               runners[0].CleanUp()
+       }
+
+       ticker := time.NewTicker(time.Second)
+
+       for {
+               select {
+               case <-ticker.C:
+                       suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
+                       delta, _ := deltaTracker.Delta(suites)
+
+                       suitesToRun := []testsuite.TestSuite{}
+
+                       if len(delta.NewSuites) > 0 {
+                               fmt.Printf(greenColor+"Detected %d new %s:\n"+defaultStyle, len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
+                               for _, suite := range delta.NewSuites {
+                                       suitesToRun = append(suitesToRun, suite.Suite)
+                                       fmt.Println("  " + suite.Description())
+                               }
+                       }
+
+                       modifiedSuites := delta.ModifiedSuites()
+                       if len(modifiedSuites) > 0 {
+                               fmt.Println(greenColor + "\nDetected changes in:" + defaultStyle)
+                               for _, pkg := range delta.ModifiedPackages {
+                                       fmt.Println("  " + pkg)
+                               }
+                               fmt.Printf(greenColor+"Will run %d %s:\n"+defaultStyle, len(modifiedSuites), pluralizedWord("suite", "suites", len(modifiedSuites)))
+                               for _, suite := range modifiedSuites {
+                                       suitesToRun = append(suitesToRun, suite.Suite)
+                                       fmt.Println("  " + suite.Description())
+                               }
+                               fmt.Println("")
+                       }
+
+                       if len(suitesToRun) > 0 {
+                               w.UpdateSeed()
+                               w.ComputeSuccinctMode(len(suitesToRun))
+                               runners := w.runnersForSuites(suitesToRun, additionalArgs)
+                               result, _ := w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, func(suite testsuite.TestSuite) {
+                                       deltaTracker.WillRun(suite)
+                               })
+                               for _, runner := range runners {
+                                       runner.CleanUp()
+                               }
+                               if !w.interruptHandler.WasInterrupted() {
+                                       color := redColor
+                                       if result.Passed {
+                                               color = greenColor
+                                       }
+                                       fmt.Println(color + "\nDone.  Resuming watch..." + defaultStyle)
+                               }
+                       }
+
+               case <-w.interruptHandler.C:
+                       return
+               }
+       }
+}
+
+func (w *SpecWatcher) ComputeSuccinctMode(numSuites int) {
+       if config.DefaultReporterConfig.Verbose {
+               config.DefaultReporterConfig.Succinct = false
+               return
+       }
+
+       if w.commandFlags.wasSet("succinct") {
+               return
+       }
+
+       if numSuites == 1 {
+               config.DefaultReporterConfig.Succinct = false
+       }
+
+       if numSuites > 1 {
+               config.DefaultReporterConfig.Succinct = true
+       }
+}
+
+func (w *SpecWatcher) UpdateSeed() {
+       if !w.commandFlags.wasSet("seed") {
+               config.GinkgoConfig.RandomSeed = time.Now().Unix()
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go
new file mode 100644 (file)
index 0000000..36f6d8e
--- /dev/null
@@ -0,0 +1,536 @@
+/*
+Ginkgo is a BDD-style testing framework for Golang
+
+The godoc documentation describes Ginkgo's API.  More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
+
+Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
+
+Ginkgo on Github: http://github.com/onsi/ginkgo
+
+Ginkgo is MIT-Licensed
+*/
+package ginkgo
+
+import (
+       "flag"
+       "fmt"
+       "io"
+       "net/http"
+       "os"
+       "strings"
+       "time"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/internal/codelocation"
+       "github.com/onsi/ginkgo/internal/failer"
+       "github.com/onsi/ginkgo/internal/remote"
+       "github.com/onsi/ginkgo/internal/suite"
+       "github.com/onsi/ginkgo/internal/testingtproxy"
+       "github.com/onsi/ginkgo/internal/writer"
+       "github.com/onsi/ginkgo/reporters"
+       "github.com/onsi/ginkgo/reporters/stenographer"
+       "github.com/onsi/ginkgo/types"
+)
+
+const GINKGO_VERSION = config.VERSION
+const GINKGO_PANIC = `
+Your test failed.
+Ginkgo panics to prevent subsequent assertions from running.
+Normally Ginkgo rescues this panic so you shouldn't see it.
+
+But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
+To circumvent this, you should call
+
+       defer GinkgoRecover()
+
+at the top of the goroutine that caused this panic.
+`
+const defaultTimeout = 1
+
+var globalSuite *suite.Suite
+var globalFailer *failer.Failer
+
+func init() {
+       config.Flags(flag.CommandLine, "ginkgo", true)
+       GinkgoWriter = writer.New(os.Stdout)
+       globalFailer = failer.New()
+       globalSuite = suite.New(globalFailer)
+}
+
+//GinkgoWriter implements an io.Writer
+//When running in verbose mode any writes to GinkgoWriter will be immediately printed
+//to stdout.  Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
+//only if the current test fails.
+var GinkgoWriter io.Writer
+
+//The interface by which Ginkgo receives *testing.T
+type GinkgoTestingT interface {
+       Fail()
+}
+
+//GinkgoParallelNode returns the parallel node number for the current ginkgo process
+//The node number is 1-indexed
+func GinkgoParallelNode() int {
+       return config.GinkgoConfig.ParallelNode
+}
+
+//Some matcher libraries or legacy codebases require a *testing.T
+//GinkgoT implements an interface analogous to *testing.T and can be used if
+//the library in question accepts *testing.T through an interface
+//
+// For example, with testify:
+// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
+//
+// Or with gomock:
+// gomock.NewController(GinkgoT())
+//
+// GinkgoT() takes an optional offset argument that can be used to get the
+// correct line number associated with the failure.
+func GinkgoT(optionalOffset ...int) GinkgoTInterface {
+       offset := 3
+       if len(optionalOffset) > 0 {
+               offset = optionalOffset[0]
+       }
+       return testingtproxy.New(GinkgoWriter, Fail, offset)
+}
+
+//The interface returned by GinkgoT().  This covers most of the methods
+//in the testing package's T.
+type GinkgoTInterface interface {
+       Fail()
+       Error(args ...interface{})
+       Errorf(format string, args ...interface{})
+       FailNow()
+       Fatal(args ...interface{})
+       Fatalf(format string, args ...interface{})
+       Log(args ...interface{})
+       Logf(format string, args ...interface{})
+       Failed() bool
+       Parallel()
+       Skip(args ...interface{})
+       Skipf(format string, args ...interface{})
+       SkipNow()
+       Skipped() bool
+}
+
+//Custom Ginkgo test reporters must implement the Reporter interface.
+//
+//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
+//and a SpecSummary just before a spec begins and just after a spec ends
+type Reporter reporters.Reporter
+
+//Asynchronous specs are given a channel of the Done type.  You must close or write to the channel
+//to tell Ginkgo that your async test is done.
+type Done chan<- interface{}
+
+//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
+//     FullTestText: a concatenation of ComponentTexts and the TestText
+//     ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
+//     TestText: the text in the actual It or Measure node
+//     IsMeasurement: true if the current test is a measurement
+//     FileName: the name of the file containing the current test
+//     LineNumber: the line number for the current test
+//     Failed: if the current test has failed, this will be true (useful in an AfterEach)
+type GinkgoTestDescription struct {
+       FullTestText   string
+       ComponentTexts []string
+       TestText       string
+
+       IsMeasurement bool
+
+       FileName   string
+       LineNumber int
+
+       Failed bool
+}
+
+//CurrentGinkgoTestDescripton returns information about the current running test.
+func CurrentGinkgoTestDescription() GinkgoTestDescription {
+       summary, ok := globalSuite.CurrentRunningSpecSummary()
+       if !ok {
+               return GinkgoTestDescription{}
+       }
+
+       subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
+
+       return GinkgoTestDescription{
+               ComponentTexts: summary.ComponentTexts[1:],
+               FullTestText:   strings.Join(summary.ComponentTexts[1:], " "),
+               TestText:       summary.ComponentTexts[len(summary.ComponentTexts)-1],
+               IsMeasurement:  summary.IsMeasurement,
+               FileName:       subjectCodeLocation.FileName,
+               LineNumber:     subjectCodeLocation.LineNumber,
+               Failed:         summary.HasFailureState(),
+       }
+}
+
+//Measurement tests receive a Benchmarker.
+//
+//You use the Time() function to time how long the passed in body function takes to run
+//You use the RecordValue() function to track arbitrary numerical measurements.
+//The optional info argument is passed to the test reporter and can be used to
+// provide the measurement data to a custom reporter with context.
+//
+//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
+type Benchmarker interface {
+       Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
+       RecordValue(name string, value float64, info ...interface{})
+}
+
+//RunSpecs is the entry point for the Ginkgo test runner.
+//You must call this within a Golang testing TestX(t *testing.T) function.
+//
+//To bootstrap a test suite you can use the Ginkgo CLI:
+//
+//     ginkgo bootstrap
+func RunSpecs(t GinkgoTestingT, description string) bool {
+       specReporters := []Reporter{buildDefaultReporter()}
+       return RunSpecsWithCustomReporters(t, description, specReporters)
+}
+
+//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
+//RunSpecs() with this method.
+func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
+       specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...)
+       return RunSpecsWithCustomReporters(t, description, specReporters)
+}
+
+//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
+//RunSpecs() with this method.  Note that parallel tests will not work correctly without the default reporter
+func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
+       writer := GinkgoWriter.(*writer.Writer)
+       writer.SetStream(config.DefaultReporterConfig.Verbose)
+       reporters := make([]reporters.Reporter, len(specReporters))
+       for i, reporter := range specReporters {
+               reporters[i] = reporter
+       }
+       passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
+       if passed && hasFocusedTests {
+               fmt.Println("PASS | FOCUSED")
+               os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
+       }
+       return passed
+}
+
+func buildDefaultReporter() Reporter {
+       remoteReportingServer := config.GinkgoConfig.StreamHost
+       if remoteReportingServer == "" {
+               stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
+               return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
+       } else {
+               return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor())
+       }
+}
+
+//Skip notifies Ginkgo that the current spec should be skipped.
+func Skip(message string, callerSkip ...int) {
+       skip := 0
+       if len(callerSkip) > 0 {
+               skip = callerSkip[0]
+       }
+
+       globalFailer.Skip(message, codelocation.New(skip+1))
+       panic(GINKGO_PANIC)
+}
+
+//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
+func Fail(message string, callerSkip ...int) {
+       skip := 0
+       if len(callerSkip) > 0 {
+               skip = callerSkip[0]
+       }
+
+       globalFailer.Fail(message, codelocation.New(skip+1))
+       panic(GINKGO_PANIC)
+}
+
+//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
+//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
+//calls out to Gomega
+//
+//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
+//further assertions from running.  This panic must be recovered.  Ginkgo does this for you
+//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
+//
+//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
+//way for Ginkgo to rescue the panic.  To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
+func GinkgoRecover() {
+       e := recover()
+       if e != nil {
+               globalFailer.Panic(codelocation.New(1), e)
+       }
+}
+
+//Describe blocks allow you to organize your specs.  A Describe block can contain any number of
+//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
+//
+//In addition you can nest Describe and Context blocks.  Describe and Context blocks are functionally
+//equivalent.  The difference is purely semantic -- you typical Describe the behavior of an object
+//or method and, within that Describe, outline a number of Contexts.
+func Describe(text string, body func()) bool {
+       globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
+       return true
+}
+
+//You can focus the tests within a describe block using FDescribe
+func FDescribe(text string, body func()) bool {
+       globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
+       return true
+}
+
+//You can mark the tests within a describe block as pending using PDescribe
+func PDescribe(text string, body func()) bool {
+       globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
+       return true
+}
+
+//You can mark the tests within a describe block as pending using XDescribe
+func XDescribe(text string, body func()) bool {
+       globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
+       return true
+}
+
+//Context blocks allow you to organize your specs.  A Context block can contain any number of
+//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
+//
+//In addition you can nest Describe and Context blocks.  Describe and Context blocks are functionally
+//equivalent.  The difference is purely semantic -- you typical Describe the behavior of an object
+//or method and, within that Describe, outline a number of Contexts.
+func Context(text string, body func()) bool {
+       globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
+       return true
+}
+
+//You can focus the tests within a describe block using FContext
+func FContext(text string, body func()) bool {
+       globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
+       return true
+}
+
+//You can mark the tests within a describe block as pending using PContext
+func PContext(text string, body func()) bool {
+       globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
+       return true
+}
+
+//You can mark the tests within a describe block as pending using XContext
+func XContext(text string, body func()) bool {
+       globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
+       return true
+}
+
+//It blocks contain your test code and assertions.  You cannot nest any other Ginkgo blocks
+//within an It block.
+//
+//Ginkgo will normally run It blocks synchronously.  To perform asynchronous tests, pass a
+//function that accepts a Done channel.  When you do this, you can also provide an optional timeout.
+func It(text string, body interface{}, timeout ...float64) bool {
+       globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
+       return true
+}
+
+//You can focus individual Its using FIt
+func FIt(text string, body interface{}, timeout ...float64) bool {
+       globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
+       return true
+}
+
+//You can mark Its as pending using PIt
+func PIt(text string, _ ...interface{}) bool {
+       globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
+       return true
+}
+
+//You can mark Its as pending using XIt
+func XIt(text string, _ ...interface{}) bool {
+       globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
+       return true
+}
+
+//By allows you to better document large Its.
+//
+//Generally you should try to keep your Its short and to the point.  This is not always possible, however,
+//especially in the context of integration tests that capture a particular workflow.
+//
+//By allows you to document such flows.  By must be called within a runnable node (It, BeforeEach, Measure, etc...)
+//By will simply log the passed in text to the GinkgoWriter.  If By is handed a function it will immediately run the function.
+func By(text string, callbacks ...func()) {
+       preamble := "\x1b[1mSTEP\x1b[0m"
+       if config.DefaultReporterConfig.NoColor {
+               preamble = "STEP"
+       }
+       fmt.Fprintln(GinkgoWriter, preamble+": "+text)
+       if len(callbacks) == 1 {
+               callbacks[0]()
+       }
+       if len(callbacks) > 1 {
+               panic("just one callback per By, please")
+       }
+}
+
+//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
+//and accumulate metrics provided to the Benchmarker by the body function.
+//
+//The body function must have the signature:
+//     func(b Benchmarker)
+func Measure(text string, body interface{}, samples int) bool {
+       globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
+       return true
+}
+
+//You can focus individual Measures using FMeasure
+func FMeasure(text string, body interface{}, samples int) bool {
+       globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
+       return true
+}
+
+//You can mark Maeasurements as pending using PMeasure
+func PMeasure(text string, _ ...interface{}) bool {
+       globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
+       return true
+}
+
+//You can mark Maeasurements as pending using XMeasure
+func XMeasure(text string, _ ...interface{}) bool {
+       globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
+       return true
+}
+
+//BeforeSuite blocks are run just once before any specs are run.  When running in parallel, each
+//parallel node process will call BeforeSuite.
+//
+//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
+//
+//You may only register *one* BeforeSuite handler per test suite.  You typically do so in your bootstrap file at the top level.
+func BeforeSuite(body interface{}, timeout ...float64) bool {
+       globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
+       return true
+}
+
+//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
+//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
+//
+//When running in parallel, each parallel node process will call AfterSuite.
+//
+//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
+//
+//You may only register *one* AfterSuite handler per test suite.  You typically do so in your bootstrap file at the top level.
+func AfterSuite(body interface{}, timeout ...float64) bool {
+       globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
+       return true
+}
+
+//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
+//nodes when running tests in parallel.  For example, say you have a shared database that you can only start one instance of that
+//must be used in your tests.  When running in parallel, only one node should set up the database and all other nodes should wait
+//until that node is done before running.
+//
+//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments.  The first is only run on parallel node #1.  The second is
+//run on all nodes, but *only* after the first function completes succesfully.  Ginkgo also makes it possible to send data from the first function (on Node 1)
+//to the second function (on all the other nodes).
+//
+//The functions have the following signatures.  The first function (which only runs on node 1) has the signature:
+//
+//     func() []byte
+//
+//or, to run asynchronously:
+//
+//     func(done Done) []byte
+//
+//The byte array returned by the first function is then passed to the second function, which has the signature:
+//
+//     func(data []byte)
+//
+//or, to run asynchronously:
+//
+//     func(data []byte, done Done)
+//
+//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
+//
+//     var dbClient db.Client
+//     var dbRunner db.Runner
+//
+//     var _ = SynchronizedBeforeSuite(func() []byte {
+//             dbRunner = db.NewRunner()
+//             err := dbRunner.Start()
+//             Î©(err).ShouldNot(HaveOccurred())
+//             return []byte(dbRunner.URL)
+//     }, func(data []byte) {
+//             dbClient = db.NewClient()
+//             err := dbClient.Connect(string(data))
+//             Î©(err).ShouldNot(HaveOccurred())
+//     })
+func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
+       globalSuite.SetSynchronizedBeforeSuiteNode(
+               node1Body,
+               allNodesBody,
+               codelocation.New(1),
+               parseTimeout(timeout...),
+       )
+       return true
+}
+
+//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
+//external singleton resources shared across nodes when running tests in parallel.
+//
+//SynchronizedAfterSuite accomplishes this by taking *two* function arguments.  The first runs on all nodes.  The second runs only on parallel node #1
+//and *only* after all other nodes have finished and exited.  This ensures that node 1, and any resources it is running, remain alive until
+//all other nodes are finished.
+//
+//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
+//
+//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite.  Here, SynchronizedAfterSuite is used to tear down the shared database
+//only after all nodes have finished:
+//
+//     var _ = SynchronizedAfterSuite(func() {
+//             dbClient.Cleanup()
+//     }, func() {
+//             dbRunner.Stop()
+//     })
+func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
+       globalSuite.SetSynchronizedAfterSuiteNode(
+               allNodesBody,
+               node1Body,
+               codelocation.New(1),
+               parseTimeout(timeout...),
+       )
+       return true
+}
+
+//BeforeEach blocks are run before It blocks.  When multiple BeforeEach blocks are defined in nested
+//Describe and Context blocks the outermost BeforeEach blocks are run first.
+//
+//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
+//a Done channel
+func BeforeEach(body interface{}, timeout ...float64) bool {
+       globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
+       return true
+}
+
+//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks.  For more details,
+//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
+//
+//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
+//a Done channel
+func JustBeforeEach(body interface{}, timeout ...float64) bool {
+       globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
+       return true
+}
+
+//AfterEach blocks are run after It blocks.   When multiple AfterEach blocks are defined in nested
+//Describe and Context blocks the innermost AfterEach blocks are run first.
+//
+//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
+//a Done channel
+func AfterEach(body interface{}, timeout ...float64) bool {
+       globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
+       return true
+}
+
+func parseTimeout(timeout ...float64) time.Duration {
+       if len(timeout) == 0 {
+               return time.Duration(defaultTimeout * int64(time.Second))
+       } else {
+               return time.Duration(timeout[0] * float64(time.Second))
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/integration/integration.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/integration/integration.go
new file mode 100644 (file)
index 0000000..76ab1b7
--- /dev/null
@@ -0,0 +1 @@
+package integration
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/codelocation/code_location.go
new file mode 100644 (file)
index 0000000..fa2f0bf
--- /dev/null
@@ -0,0 +1,32 @@
+package codelocation
+
+import (
+       "regexp"
+       "runtime"
+       "runtime/debug"
+       "strings"
+
+       "github.com/onsi/ginkgo/types"
+)
+
+func New(skip int) types.CodeLocation {
+       _, file, line, _ := runtime.Caller(skip + 1)
+       stackTrace := PruneStack(string(debug.Stack()), skip)
+       return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
+}
+
+func PruneStack(fullStackTrace string, skip int) string {
+       stack := strings.Split(fullStackTrace, "\n")
+       if len(stack) > 2*(skip+1) {
+               stack = stack[2*(skip+1):]
+       }
+       prunedStack := []string{}
+       re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
+       for i := 0; i < len(stack)/2; i++ {
+               if !re.Match([]byte(stack[i*2])) {
+                       prunedStack = append(prunedStack, stack[i*2])
+                       prunedStack = append(prunedStack, stack[i*2+1])
+               }
+       }
+       return strings.Join(prunedStack, "\n")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/containernode/container_node.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/containernode/container_node.go
new file mode 100644 (file)
index 0000000..0737746
--- /dev/null
@@ -0,0 +1,151 @@
+package containernode
+
+import (
+       "math/rand"
+       "sort"
+
+       "github.com/onsi/ginkgo/internal/leafnodes"
+       "github.com/onsi/ginkgo/types"
+)
+
+type subjectOrContainerNode struct {
+       containerNode *ContainerNode
+       subjectNode   leafnodes.SubjectNode
+}
+
+func (n subjectOrContainerNode) text() string {
+       if n.containerNode != nil {
+               return n.containerNode.Text()
+       } else {
+               return n.subjectNode.Text()
+       }
+}
+
+type CollatedNodes struct {
+       Containers []*ContainerNode
+       Subject    leafnodes.SubjectNode
+}
+
+type ContainerNode struct {
+       text         string
+       flag         types.FlagType
+       codeLocation types.CodeLocation
+
+       setupNodes               []leafnodes.BasicNode
+       subjectAndContainerNodes []subjectOrContainerNode
+}
+
+func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
+       return &ContainerNode{
+               text:         text,
+               flag:         flag,
+               codeLocation: codeLocation,
+       }
+}
+
+func (container *ContainerNode) Shuffle(r *rand.Rand) {
+       sort.Sort(container)
+       permutation := r.Perm(len(container.subjectAndContainerNodes))
+       shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
+       for i, j := range permutation {
+               shuffledNodes[i] = container.subjectAndContainerNodes[j]
+       }
+       container.subjectAndContainerNodes = shuffledNodes
+}
+
+func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
+       if node.flag == types.FlagTypePending {
+               return false
+       }
+
+       shouldUnfocus := false
+       for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
+               if subjectOrContainerNode.containerNode != nil {
+                       shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
+               } else {
+                       shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
+               }
+       }
+
+       if shouldUnfocus {
+               if node.flag == types.FlagTypeFocused {
+                       node.flag = types.FlagTypeNone
+               }
+               return true
+       }
+
+       return node.flag == types.FlagTypeFocused
+}
+
+func (node *ContainerNode) Collate() []CollatedNodes {
+       return node.collate([]*ContainerNode{})
+}
+
+func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
+       collated := make([]CollatedNodes, 0)
+
+       containers := make([]*ContainerNode, len(enclosingContainers))
+       copy(containers, enclosingContainers)
+       containers = append(containers, node)
+
+       for _, subjectOrContainer := range node.subjectAndContainerNodes {
+               if subjectOrContainer.containerNode != nil {
+                       collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
+               } else {
+                       collated = append(collated, CollatedNodes{
+                               Containers: containers,
+                               Subject:    subjectOrContainer.subjectNode,
+                       })
+               }
+       }
+
+       return collated
+}
+
+func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
+       node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
+}
+
+func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
+       node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
+}
+
+func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
+       node.setupNodes = append(node.setupNodes, setupNode)
+}
+
+func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
+       nodes := []leafnodes.BasicNode{}
+       for _, setupNode := range node.setupNodes {
+               if setupNode.Type() == nodeType {
+                       nodes = append(nodes, setupNode)
+               }
+       }
+       return nodes
+}
+
+func (node *ContainerNode) Text() string {
+       return node.text
+}
+
+func (node *ContainerNode) CodeLocation() types.CodeLocation {
+       return node.codeLocation
+}
+
+func (node *ContainerNode) Flag() types.FlagType {
+       return node.flag
+}
+
+//sort.Interface
+
+func (node *ContainerNode) Len() int {
+       return len(node.subjectAndContainerNodes)
+}
+
+func (node *ContainerNode) Less(i, j int) bool {
+       return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
+}
+
+func (node *ContainerNode) Swap(i, j int) {
+       node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/failer/failer.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/failer/failer.go
new file mode 100644 (file)
index 0000000..678ea25
--- /dev/null
@@ -0,0 +1,92 @@
+package failer
+
+import (
+       "fmt"
+       "sync"
+
+       "github.com/onsi/ginkgo/types"
+)
+
+type Failer struct {
+       lock    *sync.Mutex
+       failure types.SpecFailure
+       state   types.SpecState
+}
+
+func New() *Failer {
+       return &Failer{
+               lock:  &sync.Mutex{},
+               state: types.SpecStatePassed,
+       }
+}
+
+func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
+       f.lock.Lock()
+       defer f.lock.Unlock()
+
+       if f.state == types.SpecStatePassed {
+               f.state = types.SpecStatePanicked
+               f.failure = types.SpecFailure{
+                       Message:        "Test Panicked",
+                       Location:       location,
+                       ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
+               }
+       }
+}
+
+func (f *Failer) Timeout(location types.CodeLocation) {
+       f.lock.Lock()
+       defer f.lock.Unlock()
+
+       if f.state == types.SpecStatePassed {
+               f.state = types.SpecStateTimedOut
+               f.failure = types.SpecFailure{
+                       Message:  "Timed out",
+                       Location: location,
+               }
+       }
+}
+
+func (f *Failer) Fail(message string, location types.CodeLocation) {
+       f.lock.Lock()
+       defer f.lock.Unlock()
+
+       if f.state == types.SpecStatePassed {
+               f.state = types.SpecStateFailed
+               f.failure = types.SpecFailure{
+                       Message:  message,
+                       Location: location,
+               }
+       }
+}
+
+func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
+       f.lock.Lock()
+       defer f.lock.Unlock()
+
+       failure := f.failure
+       outcome := f.state
+       if outcome != types.SpecStatePassed {
+               failure.ComponentType = componentType
+               failure.ComponentIndex = componentIndex
+               failure.ComponentCodeLocation = componentCodeLocation
+       }
+
+       f.state = types.SpecStatePassed
+       f.failure = types.SpecFailure{}
+
+       return failure, outcome
+}
+
+func (f *Failer) Skip(message string, location types.CodeLocation) {
+       f.lock.Lock()
+       defer f.lock.Unlock()
+
+       if f.state == types.SpecStatePassed {
+               f.state = types.SpecStateSkipped
+               f.failure = types.SpecFailure{
+                       Message:  message,
+                       Location: location,
+               }
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
new file mode 100644 (file)
index 0000000..bc0dd1a
--- /dev/null
@@ -0,0 +1,95 @@
+package leafnodes
+
+import (
+       "math"
+       "time"
+
+       "sync"
+
+       "github.com/onsi/ginkgo/types"
+)
+
+type benchmarker struct {
+       mu           sync.Mutex
+       measurements map[string]*types.SpecMeasurement
+       orderCounter int
+}
+
+func newBenchmarker() *benchmarker {
+       return &benchmarker{
+               measurements: make(map[string]*types.SpecMeasurement, 0),
+       }
+}
+
+func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
+       t := time.Now()
+       body()
+       elapsedTime = time.Since(t)
+
+       b.mu.Lock()
+       defer b.mu.Unlock()
+       measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", info...)
+       measurement.Results = append(measurement.Results, elapsedTime.Seconds())
+
+       return
+}
+
+func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
+       measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", info...)
+       b.mu.Lock()
+       defer b.mu.Unlock()
+       measurement.Results = append(measurement.Results, value)
+}
+
+func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement {
+       measurement, ok := b.measurements[name]
+       if !ok {
+               var computedInfo interface{}
+               computedInfo = nil
+               if len(info) > 0 {
+                       computedInfo = info[0]
+               }
+               measurement = &types.SpecMeasurement{
+                       Name:          name,
+                       Info:          computedInfo,
+                       Order:         b.orderCounter,
+                       SmallestLabel: smallestLabel,
+                       LargestLabel:  largestLabel,
+                       AverageLabel:  averageLabel,
+                       Units:         units,
+                       Results:       make([]float64, 0),
+               }
+               b.measurements[name] = measurement
+               b.orderCounter++
+       }
+
+       return measurement
+}
+
+func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
+       b.mu.Lock()
+       defer b.mu.Unlock()
+       for _, measurement := range b.measurements {
+               measurement.Smallest = math.MaxFloat64
+               measurement.Largest = -math.MaxFloat64
+               sum := float64(0)
+               sumOfSquares := float64(0)
+
+               for _, result := range measurement.Results {
+                       if result > measurement.Largest {
+                               measurement.Largest = result
+                       }
+                       if result < measurement.Smallest {
+                               measurement.Smallest = result
+                       }
+                       sum += result
+                       sumOfSquares += result * result
+               }
+
+               n := float64(len(measurement.Results))
+               measurement.Average = sum / n
+               measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
+       }
+
+       return b.measurements
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
new file mode 100644 (file)
index 0000000..8c3902d
--- /dev/null
@@ -0,0 +1,19 @@
+package leafnodes
+
+import (
+       "github.com/onsi/ginkgo/types"
+)
+
+type BasicNode interface {
+       Type() types.SpecComponentType
+       Run() (types.SpecState, types.SpecFailure)
+       CodeLocation() types.CodeLocation
+}
+
+type SubjectNode interface {
+       BasicNode
+
+       Text() string
+       Flag() types.FlagType
+       Samples() int
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
new file mode 100644 (file)
index 0000000..c76fe3a
--- /dev/null
@@ -0,0 +1,46 @@
+package leafnodes
+
+import (
+       "github.com/onsi/ginkgo/internal/failer"
+       "github.com/onsi/ginkgo/types"
+       "time"
+)
+
+type ItNode struct {
+       runner *runner
+
+       flag types.FlagType
+       text string
+}
+
+func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
+       return &ItNode{
+               runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
+               flag:   flag,
+               text:   text,
+       }
+}
+
+func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
+       return node.runner.run()
+}
+
+func (node *ItNode) Type() types.SpecComponentType {
+       return types.SpecComponentTypeIt
+}
+
+func (node *ItNode) Text() string {
+       return node.text
+}
+
+func (node *ItNode) Flag() types.FlagType {
+       return node.flag
+}
+
+func (node *ItNode) CodeLocation() types.CodeLocation {
+       return node.runner.codeLocation
+}
+
+func (node *ItNode) Samples() int {
+       return 1
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
new file mode 100644 (file)
index 0000000..efc3348
--- /dev/null
@@ -0,0 +1,61 @@
+package leafnodes
+
+import (
+       "github.com/onsi/ginkgo/internal/failer"
+       "github.com/onsi/ginkgo/types"
+       "reflect"
+)
+
+type MeasureNode struct {
+       runner *runner
+
+       text        string
+       flag        types.FlagType
+       samples     int
+       benchmarker *benchmarker
+}
+
+func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
+       benchmarker := newBenchmarker()
+
+       wrappedBody := func() {
+               reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
+       }
+
+       return &MeasureNode{
+               runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
+
+               text:        text,
+               flag:        flag,
+               samples:     samples,
+               benchmarker: benchmarker,
+       }
+}
+
+func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
+       return node.runner.run()
+}
+
+func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
+       return node.benchmarker.measurementsReport()
+}
+
+func (node *MeasureNode) Type() types.SpecComponentType {
+       return types.SpecComponentTypeMeasure
+}
+
+func (node *MeasureNode) Text() string {
+       return node.text
+}
+
+func (node *MeasureNode) Flag() types.FlagType {
+       return node.flag
+}
+
+func (node *MeasureNode) CodeLocation() types.CodeLocation {
+       return node.runner.codeLocation
+}
+
+func (node *MeasureNode) Samples() int {
+       return node.samples
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/runner.go
new file mode 100644 (file)
index 0000000..003f851
--- /dev/null
@@ -0,0 +1,113 @@
+package leafnodes
+
+import (
+       "fmt"
+       "github.com/onsi/ginkgo/internal/codelocation"
+       "github.com/onsi/ginkgo/internal/failer"
+       "github.com/onsi/ginkgo/types"
+       "reflect"
+       "time"
+)
+
+type runner struct {
+       isAsync          bool
+       asyncFunc        func(chan<- interface{})
+       syncFunc         func()
+       codeLocation     types.CodeLocation
+       timeoutThreshold time.Duration
+       nodeType         types.SpecComponentType
+       componentIndex   int
+       failer           *failer.Failer
+}
+
+func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
+       bodyType := reflect.TypeOf(body)
+       if bodyType.Kind() != reflect.Func {
+               panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
+       }
+
+       runner := &runner{
+               codeLocation:     codeLocation,
+               timeoutThreshold: timeout,
+               failer:           failer,
+               nodeType:         nodeType,
+               componentIndex:   componentIndex,
+       }
+
+       switch bodyType.NumIn() {
+       case 0:
+               runner.syncFunc = body.(func())
+               return runner
+       case 1:
+               if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
+                       panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
+               }
+
+               wrappedBody := func(done chan<- interface{}) {
+                       bodyValue := reflect.ValueOf(body)
+                       bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
+               }
+
+               runner.isAsync = true
+               runner.asyncFunc = wrappedBody
+               return runner
+       }
+
+       panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
+}
+
+func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
+       if r.isAsync {
+               return r.runAsync()
+       } else {
+               return r.runSync()
+       }
+}
+
+func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
+       done := make(chan interface{}, 1)
+
+       go func() {
+                finished := false
+
+               defer func() {
+                       if e := recover(); e != nil || !finished {
+                               r.failer.Panic(codelocation.New(2), e)
+                               select {
+                               case <-done:
+                                       break
+                               default:
+                                       close(done)
+                               }
+                       }
+               }()
+
+               r.asyncFunc(done)
+                finished = true
+       }()
+
+       select {
+       case <-done:
+       case <-time.After(r.timeoutThreshold):
+               r.failer.Timeout(r.codeLocation)
+       }
+
+       failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
+       return
+}
+func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
+        finished := false
+
+       defer func() {
+               if e := recover(); e != nil || !finished {
+                       r.failer.Panic(codelocation.New(2), e)
+               }
+
+               failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
+       }()
+
+       r.syncFunc()
+        finished = true
+
+       return
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
new file mode 100644 (file)
index 0000000..6b725a6
--- /dev/null
@@ -0,0 +1,41 @@
+package leafnodes
+
+import (
+       "github.com/onsi/ginkgo/internal/failer"
+       "github.com/onsi/ginkgo/types"
+       "time"
+)
+
+type SetupNode struct {
+       runner *runner
+}
+
+func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
+       return node.runner.run()
+}
+
+func (node *SetupNode) Type() types.SpecComponentType {
+       return node.runner.nodeType
+}
+
+func (node *SetupNode) CodeLocation() types.CodeLocation {
+       return node.runner.codeLocation
+}
+
+func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
+       return &SetupNode{
+               runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
+       }
+}
+
+func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
+       return &SetupNode{
+               runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
+       }
+}
+
+func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
+       return &SetupNode{
+               runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
new file mode 100644 (file)
index 0000000..2ccc7dc
--- /dev/null
@@ -0,0 +1,54 @@
+package leafnodes
+
+import (
+       "github.com/onsi/ginkgo/internal/failer"
+       "github.com/onsi/ginkgo/types"
+       "time"
+)
+
+type SuiteNode interface {
+       Run(parallelNode int, parallelTotal int, syncHost string) bool
+       Passed() bool
+       Summary() *types.SetupSummary
+}
+
+type simpleSuiteNode struct {
+       runner  *runner
+       outcome types.SpecState
+       failure types.SpecFailure
+       runTime time.Duration
+}
+
+func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
+       t := time.Now()
+       node.outcome, node.failure = node.runner.run()
+       node.runTime = time.Since(t)
+
+       return node.outcome == types.SpecStatePassed
+}
+
+func (node *simpleSuiteNode) Passed() bool {
+       return node.outcome == types.SpecStatePassed
+}
+
+func (node *simpleSuiteNode) Summary() *types.SetupSummary {
+       return &types.SetupSummary{
+               ComponentType: node.runner.nodeType,
+               CodeLocation:  node.runner.codeLocation,
+               State:         node.outcome,
+               RunTime:       node.runTime,
+               Failure:       node.failure,
+       }
+}
+
+func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+       return &simpleSuiteNode{
+               runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
+       }
+}
+
+func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+       return &simpleSuiteNode{
+               runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
new file mode 100644 (file)
index 0000000..e7030d9
--- /dev/null
@@ -0,0 +1,89 @@
+package leafnodes
+
+import (
+       "encoding/json"
+       "github.com/onsi/ginkgo/internal/failer"
+       "github.com/onsi/ginkgo/types"
+       "io/ioutil"
+       "net/http"
+       "time"
+)
+
+type synchronizedAfterSuiteNode struct {
+       runnerA *runner
+       runnerB *runner
+
+       outcome types.SpecState
+       failure types.SpecFailure
+       runTime time.Duration
+}
+
+func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+       return &synchronizedAfterSuiteNode{
+               runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
+               runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
+       }
+}
+
+func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
+       node.outcome, node.failure = node.runnerA.run()
+
+       if parallelNode == 1 {
+               if parallelTotal > 1 {
+                       node.waitUntilOtherNodesAreDone(syncHost)
+               }
+
+               outcome, failure := node.runnerB.run()
+
+               if node.outcome == types.SpecStatePassed {
+                       node.outcome, node.failure = outcome, failure
+               }
+       }
+
+       return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedAfterSuiteNode) Passed() bool {
+       return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
+       return &types.SetupSummary{
+               ComponentType: node.runnerA.nodeType,
+               CodeLocation:  node.runnerA.codeLocation,
+               State:         node.outcome,
+               RunTime:       node.runTime,
+               Failure:       node.failure,
+       }
+}
+
+func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
+       for {
+               if node.canRun(syncHost) {
+                       return
+               }
+
+               time.Sleep(50 * time.Millisecond)
+       }
+}
+
+func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
+       resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
+       if err != nil || resp.StatusCode != http.StatusOK {
+               return false
+       }
+
+       body, err := ioutil.ReadAll(resp.Body)
+       if err != nil {
+               return false
+       }
+       resp.Body.Close()
+
+       afterSuiteData := types.RemoteAfterSuiteData{}
+       err = json.Unmarshal(body, &afterSuiteData)
+       if err != nil {
+               return false
+       }
+
+       return afterSuiteData.CanRun
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
new file mode 100644 (file)
index 0000000..76a9679
--- /dev/null
@@ -0,0 +1,182 @@
+package leafnodes
+
+import (
+       "bytes"
+       "encoding/json"
+       "github.com/onsi/ginkgo/internal/failer"
+       "github.com/onsi/ginkgo/types"
+       "io/ioutil"
+       "net/http"
+       "reflect"
+       "time"
+)
+
+type synchronizedBeforeSuiteNode struct {
+       runnerA *runner
+       runnerB *runner
+
+       data []byte
+
+       outcome types.SpecState
+       failure types.SpecFailure
+       runTime time.Duration
+}
+
+func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+       node := &synchronizedBeforeSuiteNode{}
+
+       node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
+       node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
+
+       return node
+}
+
+func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
+       t := time.Now()
+       defer func() {
+               node.runTime = time.Since(t)
+       }()
+
+       if parallelNode == 1 {
+               node.outcome, node.failure = node.runA(parallelTotal, syncHost)
+       } else {
+               node.outcome, node.failure = node.waitForA(syncHost)
+       }
+
+       if node.outcome != types.SpecStatePassed {
+               return false
+       }
+       node.outcome, node.failure = node.runnerB.run()
+
+       return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
+       outcome, failure := node.runnerA.run()
+
+       if parallelTotal > 1 {
+               state := types.RemoteBeforeSuiteStatePassed
+               if outcome != types.SpecStatePassed {
+                       state = types.RemoteBeforeSuiteStateFailed
+               }
+               json := (types.RemoteBeforeSuiteData{
+                       Data:  node.data,
+                       State: state,
+               }).ToJSON()
+               http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
+       }
+
+       return outcome, failure
+}
+
+func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
+       failure := func(message string) types.SpecFailure {
+               return types.SpecFailure{
+                       Message:               message,
+                       Location:              node.runnerA.codeLocation,
+                       ComponentType:         node.runnerA.nodeType,
+                       ComponentIndex:        node.runnerA.componentIndex,
+                       ComponentCodeLocation: node.runnerA.codeLocation,
+               }
+       }
+       for {
+               resp, err := http.Get(syncHost + "/BeforeSuiteState")
+               if err != nil || resp.StatusCode != http.StatusOK {
+                       return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
+               }
+
+               body, err := ioutil.ReadAll(resp.Body)
+               if err != nil {
+                       return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
+               }
+               resp.Body.Close()
+
+               beforeSuiteData := types.RemoteBeforeSuiteData{}
+               err = json.Unmarshal(body, &beforeSuiteData)
+               if err != nil {
+                       return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
+               }
+
+               switch beforeSuiteData.State {
+               case types.RemoteBeforeSuiteStatePassed:
+                       node.data = beforeSuiteData.Data
+                       return types.SpecStatePassed, types.SpecFailure{}
+               case types.RemoteBeforeSuiteStateFailed:
+                       return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
+               case types.RemoteBeforeSuiteStateDisappeared:
+                       return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
+               }
+
+               time.Sleep(50 * time.Millisecond)
+       }
+
+       return types.SpecStateFailed, failure("Shouldn't get here!")
+}
+
+func (node *synchronizedBeforeSuiteNode) Passed() bool {
+       return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
+       return &types.SetupSummary{
+               ComponentType: node.runnerA.nodeType,
+               CodeLocation:  node.runnerA.codeLocation,
+               State:         node.outcome,
+               RunTime:       node.runTime,
+               Failure:       node.failure,
+       }
+}
+
+func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
+       typeA := reflect.TypeOf(bodyA)
+       if typeA.Kind() != reflect.Func {
+               panic("SynchronizedBeforeSuite expects a function as its first argument")
+       }
+
+       takesNothing := typeA.NumIn() == 0
+       takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
+       returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
+
+       if !((takesNothing || takesADoneChannel) && returnsBytes) {
+               panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
+       }
+
+       if takesADoneChannel {
+               return func(done chan<- interface{}) {
+                       out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
+                       node.data = out[0].Interface().([]byte)
+               }
+       }
+
+       return func() {
+               out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
+               node.data = out[0].Interface().([]byte)
+       }
+}
+
+func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
+       typeB := reflect.TypeOf(bodyB)
+       if typeB.Kind() != reflect.Func {
+               panic("SynchronizedBeforeSuite expects a function as its second argument")
+       }
+
+       returnsNothing := typeB.NumOut() == 0
+       takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
+       takesBytesAndDone := typeB.NumIn() == 2 &&
+               typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
+               typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
+
+       if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
+               panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
+       }
+
+       if takesBytesAndDone {
+               return func(done chan<- interface{}) {
+                       reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
+               }
+       }
+
+       return func() {
+               reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/aggregator.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/aggregator.go
new file mode 100644 (file)
index 0000000..1e34dbf
--- /dev/null
@@ -0,0 +1,250 @@
+/*
+
+Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
+coherently as tests complete.  You shouldn't need to use this in your code.  To run tests in parallel:
+
+       ginkgo -nodes=N
+
+where N is the number of nodes you desire.
+*/
+package remote
+
+import (
+       "time"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/reporters/stenographer"
+       "github.com/onsi/ginkgo/types"
+)
+
+type configAndSuite struct {
+       config  config.GinkgoConfigType
+       summary *types.SuiteSummary
+}
+
+type Aggregator struct {
+       nodeCount    int
+       config       config.DefaultReporterConfigType
+       stenographer stenographer.Stenographer
+       result       chan bool
+
+       suiteBeginnings           chan configAndSuite
+       aggregatedSuiteBeginnings []configAndSuite
+
+       beforeSuites           chan *types.SetupSummary
+       aggregatedBeforeSuites []*types.SetupSummary
+
+       afterSuites           chan *types.SetupSummary
+       aggregatedAfterSuites []*types.SetupSummary
+
+       specCompletions chan *types.SpecSummary
+       completedSpecs  []*types.SpecSummary
+
+       suiteEndings           chan *types.SuiteSummary
+       aggregatedSuiteEndings []*types.SuiteSummary
+       specs                  []*types.SpecSummary
+
+       startTime time.Time
+}
+
+func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
+       aggregator := &Aggregator{
+               nodeCount:    nodeCount,
+               result:       result,
+               config:       config,
+               stenographer: stenographer,
+
+               suiteBeginnings: make(chan configAndSuite, 0),
+               beforeSuites:    make(chan *types.SetupSummary, 0),
+               afterSuites:     make(chan *types.SetupSummary, 0),
+               specCompletions: make(chan *types.SpecSummary, 0),
+               suiteEndings:    make(chan *types.SuiteSummary, 0),
+       }
+
+       go aggregator.mux()
+
+       return aggregator
+}
+
+func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+       aggregator.suiteBeginnings <- configAndSuite{config, summary}
+}
+
+func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+       aggregator.beforeSuites <- setupSummary
+}
+
+func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+       aggregator.afterSuites <- setupSummary
+}
+
+func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
+       //noop
+}
+
+func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
+       aggregator.specCompletions <- specSummary
+}
+
+func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+       aggregator.suiteEndings <- summary
+}
+
+func (aggregator *Aggregator) mux() {
+loop:
+       for {
+               select {
+               case configAndSuite := <-aggregator.suiteBeginnings:
+                       aggregator.registerSuiteBeginning(configAndSuite)
+               case setupSummary := <-aggregator.beforeSuites:
+                       aggregator.registerBeforeSuite(setupSummary)
+               case setupSummary := <-aggregator.afterSuites:
+                       aggregator.registerAfterSuite(setupSummary)
+               case specSummary := <-aggregator.specCompletions:
+                       aggregator.registerSpecCompletion(specSummary)
+               case suite := <-aggregator.suiteEndings:
+                       finished, passed := aggregator.registerSuiteEnding(suite)
+                       if finished {
+                               aggregator.result <- passed
+                               break loop
+                       }
+               }
+       }
+}
+
+func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
+       aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
+
+       if len(aggregator.aggregatedSuiteBeginnings) == 1 {
+               aggregator.startTime = time.Now()
+       }
+
+       if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
+               return
+       }
+
+       aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
+
+       numberOfSpecsToRun := 0
+       totalNumberOfSpecs := 0
+       for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings {
+               numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun
+               totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs
+       }
+
+       aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct)
+       aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
+       aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
+       aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
+       aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
+       aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
+       aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
+       aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
+       aggregator.specs = append(aggregator.specs, specSummary)
+       aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) flushCompletedSpecs() {
+       if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
+               return
+       }
+
+       for _, setupSummary := range aggregator.aggregatedBeforeSuites {
+               aggregator.announceBeforeSuite(setupSummary)
+       }
+
+       for _, specSummary := range aggregator.completedSpecs {
+               aggregator.announceSpec(specSummary)
+       }
+
+       for _, setupSummary := range aggregator.aggregatedAfterSuites {
+               aggregator.announceAfterSuite(setupSummary)
+       }
+
+       aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
+       aggregator.completedSpecs = []*types.SpecSummary{}
+       aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
+}
+
+func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
+       aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
+       if setupSummary.State != types.SpecStatePassed {
+               aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+       }
+}
+
+func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
+       aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
+       if setupSummary.State != types.SpecStatePassed {
+               aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+       }
+}
+
+func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
+       if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
+               aggregator.stenographer.AnnounceSpecWillRun(specSummary)
+       }
+
+       aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
+
+       switch specSummary.State {
+       case types.SpecStatePassed:
+               if specSummary.IsMeasurement {
+                       aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
+               } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
+                       aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
+               } else {
+                       aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
+               }
+
+       case types.SpecStatePending:
+               aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
+       case types.SpecStateSkipped:
+               aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+       case types.SpecStateTimedOut:
+               aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+       case types.SpecStatePanicked:
+               aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+       case types.SpecStateFailed:
+               aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+       }
+}
+
+func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
+       aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
+       if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
+               return false, false
+       }
+
+       aggregatedSuiteSummary := &types.SuiteSummary{}
+       aggregatedSuiteSummary.SuiteSucceeded = true
+
+       for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
+               if suiteSummary.SuiteSucceeded == false {
+                       aggregatedSuiteSummary.SuiteSucceeded = false
+               }
+
+               aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
+               aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
+               aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
+               aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
+               aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
+               aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
+       }
+
+       aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
+
+       aggregator.stenographer.SummarizeFailures(aggregator.specs)
+       aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
+
+       return true, aggregatedSuiteSummary.SuiteSucceeded
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
new file mode 100644 (file)
index 0000000..025eb50
--- /dev/null
@@ -0,0 +1,90 @@
+package remote
+
+import (
+       "bytes"
+       "encoding/json"
+       "io"
+       "net/http"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/types"
+)
+
+//An interface to net/http's client to allow the injection of fakes under test
+type Poster interface {
+       Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
+}
+
+/*
+The ForwardingReporter is a Ginkgo reporter that forwards information to
+a Ginkgo remote server.
+
+When streaming parallel test output, this repoter is automatically installed by Ginkgo.
+
+This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
+detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
+in place of Ginkgo's DefaultReporter.
+*/
+
+type ForwardingReporter struct {
+       serverHost        string
+       poster            Poster
+       outputInterceptor OutputInterceptor
+}
+
+func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter {
+       return &ForwardingReporter{
+               serverHost:        serverHost,
+               poster:            poster,
+               outputInterceptor: outputInterceptor,
+       }
+}
+
+func (reporter *ForwardingReporter) post(path string, data interface{}) {
+       encoded, _ := json.Marshal(data)
+       buffer := bytes.NewBuffer(encoded)
+       reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
+}
+
+func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
+       data := struct {
+               Config  config.GinkgoConfigType `json:"config"`
+               Summary *types.SuiteSummary     `json:"suite-summary"`
+       }{
+               conf,
+               summary,
+       }
+
+       reporter.outputInterceptor.StartInterceptingOutput()
+       reporter.post("/SpecSuiteWillBegin", data)
+}
+
+func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+       output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+       reporter.outputInterceptor.StartInterceptingOutput()
+       setupSummary.CapturedOutput = output
+       reporter.post("/BeforeSuiteDidRun", setupSummary)
+}
+
+func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
+       reporter.post("/SpecWillRun", specSummary)
+}
+
+func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+       output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+       reporter.outputInterceptor.StartInterceptingOutput()
+       specSummary.CapturedOutput = output
+       reporter.post("/SpecDidComplete", specSummary)
+}
+
+func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+       output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+       reporter.outputInterceptor.StartInterceptingOutput()
+       setupSummary.CapturedOutput = output
+       reporter.post("/AfterSuiteDidRun", setupSummary)
+}
+
+func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+       reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+       reporter.post("/SpecSuiteDidEnd", summary)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
new file mode 100644 (file)
index 0000000..093f451
--- /dev/null
@@ -0,0 +1,10 @@
+package remote
+
+/*
+The OutputInterceptor is used by the ForwardingReporter to
+intercept and capture all stdin and stderr output during a test run.
+*/
+type OutputInterceptor interface {
+       StartInterceptingOutput() error
+       StopInterceptingAndReturnOutput() (string, error)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
new file mode 100644 (file)
index 0000000..181b227
--- /dev/null
@@ -0,0 +1,52 @@
+// +build freebsd openbsd netbsd dragonfly darwin linux
+
+package remote
+
+import (
+       "errors"
+       "io/ioutil"
+       "os"
+       "syscall"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+       return &outputInterceptor{}
+}
+
+type outputInterceptor struct {
+       redirectFile *os.File
+       intercepting bool
+}
+
+func (interceptor *outputInterceptor) StartInterceptingOutput() error {
+       if interceptor.intercepting {
+               return errors.New("Already intercepting output!")
+       }
+       interceptor.intercepting = true
+
+       var err error
+
+       interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
+       if err != nil {
+               return err
+       }
+
+       syscall.Dup2(int(interceptor.redirectFile.Fd()), 1)
+       syscall.Dup2(int(interceptor.redirectFile.Fd()), 2)
+
+       return nil
+}
+
+func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
+       if !interceptor.intercepting {
+               return "", errors.New("Not intercepting output!")
+       }
+
+       interceptor.redirectFile.Close()
+       output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
+       os.Remove(interceptor.redirectFile.Name())
+
+       interceptor.intercepting = false
+
+       return string(output), err
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
new file mode 100644 (file)
index 0000000..c8f97d9
--- /dev/null
@@ -0,0 +1,33 @@
+// +build windows
+
+package remote
+
+import (
+       "errors"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+       return &outputInterceptor{}
+}
+
+type outputInterceptor struct {
+       intercepting bool
+}
+
+func (interceptor *outputInterceptor) StartInterceptingOutput() error {
+       if interceptor.intercepting {
+               return errors.New("Already intercepting output!")
+       }
+       interceptor.intercepting = true
+
+       // not working on windows...
+
+       return nil
+}
+
+func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
+       // not working on windows...
+       interceptor.intercepting = false
+
+       return "", nil
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/server.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/server.go
new file mode 100644 (file)
index 0000000..b55c681
--- /dev/null
@@ -0,0 +1,204 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package remote
+
+import (
+       "encoding/json"
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/reporters"
+       "github.com/onsi/ginkgo/types"
+       "io/ioutil"
+       "net"
+       "net/http"
+       "sync"
+)
+
+/*
+Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type Server struct {
+       listener        net.Listener
+       reporters       []reporters.Reporter
+       alives          []func() bool
+       lock            *sync.Mutex
+       beforeSuiteData types.RemoteBeforeSuiteData
+       parallelTotal   int
+}
+
+//Create a new server, automatically selecting a port
+func NewServer(parallelTotal int) (*Server, error) {
+       listener, err := net.Listen("tcp", "127.0.0.1:0")
+       if err != nil {
+               return nil, err
+       }
+       return &Server{
+               listener:        listener,
+               lock:            &sync.Mutex{},
+               alives:          make([]func() bool, parallelTotal),
+               beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending},
+               parallelTotal:   parallelTotal,
+       }, nil
+}
+
+//Start the server.  You don't need to `go s.Start()`, just `s.Start()`
+func (server *Server) Start() {
+       httpServer := &http.Server{}
+       mux := http.NewServeMux()
+       httpServer.Handler = mux
+
+       //streaming endpoints
+       mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
+       mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
+       mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
+       mux.HandleFunc("/SpecWillRun", server.specWillRun)
+       mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
+       mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
+
+       //synchronization endpoints
+       mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
+       mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
+
+       go httpServer.Serve(server.listener)
+}
+
+//Stop the server
+func (server *Server) Close() {
+       server.listener.Close()
+}
+
+//The address the server can be reached it.  Pass this into the `ForwardingReporter`.
+func (server *Server) Address() string {
+       return "http://" + server.listener.Addr().String()
+}
+
+//
+// Streaming Endpoints
+//
+
+//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
+func (server *Server) readAll(request *http.Request) []byte {
+       defer request.Body.Close()
+       body, _ := ioutil.ReadAll(request.Body)
+       return body
+}
+
+func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
+       server.reporters = reporters
+}
+
+func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
+       body := server.readAll(request)
+
+       var data struct {
+               Config  config.GinkgoConfigType `json:"config"`
+               Summary *types.SuiteSummary     `json:"suite-summary"`
+       }
+
+       json.Unmarshal(body, &data)
+
+       for _, reporter := range server.reporters {
+               reporter.SpecSuiteWillBegin(data.Config, data.Summary)
+       }
+}
+
+func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
+       body := server.readAll(request)
+       var setupSummary *types.SetupSummary
+       json.Unmarshal(body, &setupSummary)
+
+       for _, reporter := range server.reporters {
+               reporter.BeforeSuiteDidRun(setupSummary)
+       }
+}
+
+func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
+       body := server.readAll(request)
+       var setupSummary *types.SetupSummary
+       json.Unmarshal(body, &setupSummary)
+
+       for _, reporter := range server.reporters {
+               reporter.AfterSuiteDidRun(setupSummary)
+       }
+}
+
+func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
+       body := server.readAll(request)
+       var specSummary *types.SpecSummary
+       json.Unmarshal(body, &specSummary)
+
+       for _, reporter := range server.reporters {
+               reporter.SpecWillRun(specSummary)
+       }
+}
+
+func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
+       body := server.readAll(request)
+       var specSummary *types.SpecSummary
+       json.Unmarshal(body, &specSummary)
+
+       for _, reporter := range server.reporters {
+               reporter.SpecDidComplete(specSummary)
+       }
+}
+
+func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
+       body := server.readAll(request)
+       var suiteSummary *types.SuiteSummary
+       json.Unmarshal(body, &suiteSummary)
+
+       for _, reporter := range server.reporters {
+               reporter.SpecSuiteDidEnd(suiteSummary)
+       }
+}
+
+//
+// Synchronization Endpoints
+//
+
+func (server *Server) RegisterAlive(node int, alive func() bool) {
+       server.lock.Lock()
+       defer server.lock.Unlock()
+       server.alives[node-1] = alive
+}
+
+func (server *Server) nodeIsAlive(node int) bool {
+       server.lock.Lock()
+       defer server.lock.Unlock()
+       alive := server.alives[node-1]
+       if alive == nil {
+               return true
+       }
+       return alive()
+}
+
+func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+       if request.Method == "POST" {
+               dec := json.NewDecoder(request.Body)
+               dec.Decode(&(server.beforeSuiteData))
+       } else {
+               beforeSuiteData := server.beforeSuiteData
+               if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
+                       beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
+               }
+               enc := json.NewEncoder(writer)
+               enc.Encode(beforeSuiteData)
+       }
+}
+
+func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
+       afterSuiteData := types.RemoteAfterSuiteData{
+               CanRun: true,
+       }
+       for i := 2; i <= server.parallelTotal; i++ {
+               afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
+       }
+
+       enc := json.NewEncoder(writer)
+       enc.Encode(afterSuiteData)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/index_computer.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/index_computer.go
new file mode 100644 (file)
index 0000000..5a67fc7
--- /dev/null
@@ -0,0 +1,55 @@
+package spec
+
+func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
+       if length == 0 {
+               return 0, 0
+       }
+
+       // We have more nodes than tests. Trivial case.
+       if parallelTotal >= length {
+               if parallelNode > length {
+                       return 0, 0
+               } else {
+                       return parallelNode - 1, 1
+               }
+       }
+
+       // This is the minimum amount of tests that a node will be required to run
+       minTestsPerNode := length / parallelTotal
+
+       // This is the maximum amount of tests that a node will be required to run
+       // The algorithm guarantees that this would be equal to at least the minimum amount
+       // and at most one more
+       maxTestsPerNode := minTestsPerNode
+       if length%parallelTotal != 0 {
+               maxTestsPerNode++
+       }
+
+       // Number of nodes that will have to run the maximum amount of tests per node
+       numMaxLoadNodes := length % parallelTotal
+
+       // Number of nodes that precede the current node and will have to run the maximum amount of tests per node
+       var numPrecedingMaxLoadNodes int
+       if parallelNode > numMaxLoadNodes {
+               numPrecedingMaxLoadNodes = numMaxLoadNodes
+       } else {
+               numPrecedingMaxLoadNodes = parallelNode - 1
+       }
+
+       // Number of nodes that precede the current node and will have to run the minimum amount of tests per node
+       var numPrecedingMinLoadNodes int
+       if parallelNode <= numMaxLoadNodes {
+               numPrecedingMinLoadNodes = 0
+       } else {
+               numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
+       }
+
+       // Evaluate the test start index and number of tests to run
+       startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
+       if parallelNode > numMaxLoadNodes {
+               count = minTestsPerNode
+       } else {
+               count = maxTestsPerNode
+       }
+       return
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/spec.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/spec.go
new file mode 100644 (file)
index 0000000..ef788b7
--- /dev/null
@@ -0,0 +1,197 @@
+package spec
+
+import (
+       "fmt"
+       "io"
+       "time"
+
+       "github.com/onsi/ginkgo/internal/containernode"
+       "github.com/onsi/ginkgo/internal/leafnodes"
+       "github.com/onsi/ginkgo/types"
+)
+
+type Spec struct {
+       subject          leafnodes.SubjectNode
+       focused          bool
+       announceProgress bool
+
+       containers []*containernode.ContainerNode
+
+       state   types.SpecState
+       runTime time.Duration
+       failure types.SpecFailure
+}
+
+func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
+       spec := &Spec{
+               subject:          subject,
+               containers:       containers,
+               focused:          subject.Flag() == types.FlagTypeFocused,
+               announceProgress: announceProgress,
+       }
+
+       spec.processFlag(subject.Flag())
+       for i := len(containers) - 1; i >= 0; i-- {
+               spec.processFlag(containers[i].Flag())
+       }
+
+       return spec
+}
+
+func (spec *Spec) processFlag(flag types.FlagType) {
+       if flag == types.FlagTypeFocused {
+               spec.focused = true
+       } else if flag == types.FlagTypePending {
+               spec.state = types.SpecStatePending
+       }
+}
+
+func (spec *Spec) Skip() {
+       spec.state = types.SpecStateSkipped
+}
+
+func (spec *Spec) Failed() bool {
+       return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut
+}
+
+func (spec *Spec) Passed() bool {
+       return spec.state == types.SpecStatePassed
+}
+
+func (spec *Spec) Pending() bool {
+       return spec.state == types.SpecStatePending
+}
+
+func (spec *Spec) Skipped() bool {
+       return spec.state == types.SpecStateSkipped
+}
+
+func (spec *Spec) Focused() bool {
+       return spec.focused
+}
+
+func (spec *Spec) IsMeasurement() bool {
+       return spec.subject.Type() == types.SpecComponentTypeMeasure
+}
+
+func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
+       componentTexts := make([]string, len(spec.containers)+1)
+       componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
+
+       for i, container := range spec.containers {
+               componentTexts[i] = container.Text()
+               componentCodeLocations[i] = container.CodeLocation()
+       }
+
+       componentTexts[len(spec.containers)] = spec.subject.Text()
+       componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
+
+       return &types.SpecSummary{
+               IsMeasurement:          spec.IsMeasurement(),
+               NumberOfSamples:        spec.subject.Samples(),
+               ComponentTexts:         componentTexts,
+               ComponentCodeLocations: componentCodeLocations,
+               State:        spec.state,
+               RunTime:      spec.runTime,
+               Failure:      spec.failure,
+               Measurements: spec.measurementsReport(),
+               SuiteID:      suiteID,
+       }
+}
+
+func (spec *Spec) ConcatenatedString() string {
+       s := ""
+       for _, container := range spec.containers {
+               s += container.Text() + " "
+       }
+
+       return s + spec.subject.Text()
+}
+
+func (spec *Spec) Run(writer io.Writer) {
+       startTime := time.Now()
+       defer func() {
+               spec.runTime = time.Since(startTime)
+       }()
+
+       for sample := 0; sample < spec.subject.Samples(); sample++ {
+               spec.runSample(sample, writer)
+
+               if spec.state != types.SpecStatePassed {
+                       return
+               }
+       }
+}
+
+func (spec *Spec) runSample(sample int, writer io.Writer) {
+       spec.state = types.SpecStatePassed
+       spec.failure = types.SpecFailure{}
+       innerMostContainerIndexToUnwind := -1
+
+       defer func() {
+               for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
+                       container := spec.containers[i]
+                       for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
+                               spec.announceSetupNode(writer, "AfterEach", container, afterEach)
+                               afterEachState, afterEachFailure := afterEach.Run()
+                               if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
+                                       spec.state = afterEachState
+                                       spec.failure = afterEachFailure
+                               }
+                       }
+               }
+       }()
+
+       for i, container := range spec.containers {
+               innerMostContainerIndexToUnwind = i
+               for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
+                       spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
+                       spec.state, spec.failure = beforeEach.Run()
+                       if spec.state != types.SpecStatePassed {
+                               return
+                       }
+               }
+       }
+
+       for _, container := range spec.containers {
+               for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
+                       spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
+                       spec.state, spec.failure = justBeforeEach.Run()
+                       if spec.state != types.SpecStatePassed {
+                               return
+                       }
+               }
+       }
+
+       spec.announceSubject(writer, spec.subject)
+       spec.state, spec.failure = spec.subject.Run()
+}
+
+func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
+       if spec.announceProgress {
+               s := fmt.Sprintf("[%s] %s\n  %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
+               writer.Write([]byte(s))
+       }
+}
+
+func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
+       if spec.announceProgress {
+               nodeType := ""
+               switch subject.Type() {
+               case types.SpecComponentTypeIt:
+                       nodeType = "It"
+               case types.SpecComponentTypeMeasure:
+                       nodeType = "Measure"
+               }
+               s := fmt.Sprintf("[%s] %s\n  %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
+               writer.Write([]byte(s))
+       }
+}
+
+func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
+       if !spec.IsMeasurement() || spec.Failed() {
+               return map[string]*types.SpecMeasurement{}
+       }
+
+       return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/specs.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/specs.go
new file mode 100644 (file)
index 0000000..9c671e3
--- /dev/null
@@ -0,0 +1,122 @@
+package spec
+
+import (
+       "math/rand"
+       "regexp"
+       "sort"
+)
+
+type Specs struct {
+       specs                 []*Spec
+       numberOfOriginalSpecs int
+       hasProgrammaticFocus  bool
+}
+
+func NewSpecs(specs []*Spec) *Specs {
+       return &Specs{
+               specs: specs,
+               numberOfOriginalSpecs: len(specs),
+       }
+}
+
+func (e *Specs) Specs() []*Spec {
+       return e.specs
+}
+
+func (e *Specs) NumberOfOriginalSpecs() int {
+       return e.numberOfOriginalSpecs
+}
+
+func (e *Specs) HasProgrammaticFocus() bool {
+       return e.hasProgrammaticFocus
+}
+
+func (e *Specs) Shuffle(r *rand.Rand) {
+       sort.Sort(e)
+       permutation := r.Perm(len(e.specs))
+       shuffledSpecs := make([]*Spec, len(e.specs))
+       for i, j := range permutation {
+               shuffledSpecs[i] = e.specs[j]
+       }
+       e.specs = shuffledSpecs
+}
+
+func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
+       if focusString == "" && skipString == "" {
+               e.applyProgrammaticFocus()
+       } else {
+               e.applyRegExpFocus(description, focusString, skipString)
+       }
+}
+
+func (e *Specs) applyProgrammaticFocus() {
+       e.hasProgrammaticFocus = false
+       for _, spec := range e.specs {
+               if spec.Focused() && !spec.Pending() {
+                       e.hasProgrammaticFocus = true
+                       break
+               }
+       }
+
+       if e.hasProgrammaticFocus {
+               for _, spec := range e.specs {
+                       if !spec.Focused() {
+                               spec.Skip()
+                       }
+               }
+       }
+}
+
+func (e *Specs) applyRegExpFocus(description string, focusString string, skipString string) {
+       for _, spec := range e.specs {
+               matchesFocus := true
+               matchesSkip := false
+
+               toMatch := []byte(description + " " + spec.ConcatenatedString())
+
+               if focusString != "" {
+                       focusFilter := regexp.MustCompile(focusString)
+                       matchesFocus = focusFilter.Match([]byte(toMatch))
+               }
+
+               if skipString != "" {
+                       skipFilter := regexp.MustCompile(skipString)
+                       matchesSkip = skipFilter.Match([]byte(toMatch))
+               }
+
+               if !matchesFocus || matchesSkip {
+                       spec.Skip()
+               }
+       }
+}
+
+func (e *Specs) SkipMeasurements() {
+       for _, spec := range e.specs {
+               if spec.IsMeasurement() {
+                       spec.Skip()
+               }
+       }
+}
+
+func (e *Specs) TrimForParallelization(total int, node int) {
+       startIndex, count := ParallelizedIndexRange(len(e.specs), total, node)
+       if count == 0 {
+               e.specs = make([]*Spec, 0)
+       } else {
+               e.specs = e.specs[startIndex : startIndex+count]
+       }
+}
+
+//sort.Interface
+
+func (e *Specs) Len() int {
+       return len(e.specs)
+}
+
+func (e *Specs) Less(i, j int) bool {
+       return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
+}
+
+func (e *Specs) Swap(i, j int) {
+       e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/specrunner/random_id.go
new file mode 100644 (file)
index 0000000..a0b8b62
--- /dev/null
@@ -0,0 +1,15 @@
+package specrunner
+
+import (
+       "crypto/rand"
+       "fmt"
+)
+
+func randomID() string {
+       b := make([]byte, 8)
+       _, err := rand.Read(b)
+       if err != nil {
+               return ""
+       }
+       return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
new file mode 100644 (file)
index 0000000..7ca7740
--- /dev/null
@@ -0,0 +1,324 @@
+package specrunner
+
+import (
+       "fmt"
+       "os"
+       "os/signal"
+       "sync"
+       "syscall"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/internal/leafnodes"
+       "github.com/onsi/ginkgo/internal/spec"
+       Writer "github.com/onsi/ginkgo/internal/writer"
+       "github.com/onsi/ginkgo/reporters"
+       "github.com/onsi/ginkgo/types"
+
+       "time"
+)
+
+type SpecRunner struct {
+       description     string
+       beforeSuiteNode leafnodes.SuiteNode
+       specs           *spec.Specs
+       afterSuiteNode  leafnodes.SuiteNode
+       reporters       []reporters.Reporter
+       startTime       time.Time
+       suiteID         string
+       runningSpec     *spec.Spec
+       writer          Writer.WriterInterface
+       config          config.GinkgoConfigType
+       interrupted     bool
+       lock            *sync.Mutex
+}
+
+func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
+       return &SpecRunner{
+               description:     description,
+               beforeSuiteNode: beforeSuiteNode,
+               specs:           specs,
+               afterSuiteNode:  afterSuiteNode,
+               reporters:       reporters,
+               writer:          writer,
+               config:          config,
+               suiteID:         randomID(),
+               lock:            &sync.Mutex{},
+       }
+}
+
+func (runner *SpecRunner) Run() bool {
+       if runner.config.DryRun {
+               runner.performDryRun()
+               return true
+       }
+
+       runner.reportSuiteWillBegin()
+       go runner.registerForInterrupts()
+
+       suitePassed := runner.runBeforeSuite()
+
+       if suitePassed {
+               suitePassed = runner.runSpecs()
+       }
+
+       runner.blockForeverIfInterrupted()
+
+       suitePassed = runner.runAfterSuite() && suitePassed
+
+       runner.reportSuiteDidEnd(suitePassed)
+
+       return suitePassed
+}
+
+func (runner *SpecRunner) performDryRun() {
+       runner.reportSuiteWillBegin()
+
+       if runner.beforeSuiteNode != nil {
+               summary := runner.beforeSuiteNode.Summary()
+               summary.State = types.SpecStatePassed
+               runner.reportBeforeSuite(summary)
+       }
+
+       for _, spec := range runner.specs.Specs() {
+               summary := spec.Summary(runner.suiteID)
+               runner.reportSpecWillRun(summary)
+               if summary.State == types.SpecStateInvalid {
+                       summary.State = types.SpecStatePassed
+               }
+               runner.reportSpecDidComplete(summary, false)
+       }
+
+       if runner.afterSuiteNode != nil {
+               summary := runner.afterSuiteNode.Summary()
+               summary.State = types.SpecStatePassed
+               runner.reportAfterSuite(summary)
+       }
+
+       runner.reportSuiteDidEnd(true)
+}
+
+func (runner *SpecRunner) runBeforeSuite() bool {
+       if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
+               return true
+       }
+
+       runner.writer.Truncate()
+       conf := runner.config
+       passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
+       if !passed {
+               runner.writer.DumpOut()
+       }
+       runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
+       return passed
+}
+
+func (runner *SpecRunner) runAfterSuite() bool {
+       if runner.afterSuiteNode == nil {
+               return true
+       }
+
+       runner.writer.Truncate()
+       conf := runner.config
+       passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
+       if !passed {
+               runner.writer.DumpOut()
+       }
+       runner.reportAfterSuite(runner.afterSuiteNode.Summary())
+       return passed
+}
+
+func (runner *SpecRunner) runSpecs() bool {
+       suiteFailed := false
+       skipRemainingSpecs := false
+       for _, spec := range runner.specs.Specs() {
+               if runner.wasInterrupted() {
+                       return suiteFailed
+               }
+               if skipRemainingSpecs {
+                       spec.Skip()
+               }
+               runner.reportSpecWillRun(spec.Summary(runner.suiteID))
+
+               if !spec.Skipped() && !spec.Pending() {
+                       runner.runningSpec = spec
+                       spec.Run(runner.writer)
+                       runner.runningSpec = nil
+                       if spec.Failed() {
+                               suiteFailed = true
+                       }
+               } else if spec.Pending() && runner.config.FailOnPending {
+                       suiteFailed = true
+               }
+
+               runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
+
+               if spec.Failed() && runner.config.FailFast {
+                       skipRemainingSpecs = true
+               }
+       }
+
+       return !suiteFailed
+}
+
+func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
+       if runner.runningSpec == nil {
+               return nil, false
+       }
+
+       return runner.runningSpec.Summary(runner.suiteID), true
+}
+
+func (runner *SpecRunner) registerForInterrupts() {
+       c := make(chan os.Signal, 1)
+       signal.Notify(c, os.Interrupt, syscall.SIGTERM)
+
+       <-c
+       signal.Stop(c)
+       runner.markInterrupted()
+       go runner.registerForHardInterrupts()
+       runner.writer.DumpOutWithHeader(`
+Received interrupt.  Emitting contents of GinkgoWriter...
+---------------------------------------------------------
+`)
+       if runner.afterSuiteNode != nil {
+               fmt.Fprint(os.Stderr, `
+---------------------------------------------------------
+Received interrupt.  Running AfterSuite...
+^C again to terminate immediately
+`)
+               runner.runAfterSuite()
+       }
+       runner.reportSuiteDidEnd(false)
+       os.Exit(1)
+}
+
+func (runner *SpecRunner) registerForHardInterrupts() {
+       c := make(chan os.Signal, 1)
+       signal.Notify(c, os.Interrupt, syscall.SIGTERM)
+
+       <-c
+       fmt.Fprintln(os.Stderr, "\nReceived second interrupt.  Shutting down.")
+       os.Exit(1)
+}
+
+func (runner *SpecRunner) blockForeverIfInterrupted() {
+       runner.lock.Lock()
+       interrupted := runner.interrupted
+       runner.lock.Unlock()
+
+       if interrupted {
+               select {}
+       }
+}
+
+func (runner *SpecRunner) markInterrupted() {
+       runner.lock.Lock()
+       defer runner.lock.Unlock()
+       runner.interrupted = true
+}
+
+func (runner *SpecRunner) wasInterrupted() bool {
+       runner.lock.Lock()
+       defer runner.lock.Unlock()
+       return runner.interrupted
+}
+
+func (runner *SpecRunner) reportSuiteWillBegin() {
+       runner.startTime = time.Now()
+       summary := runner.summary(true)
+       for _, reporter := range runner.reporters {
+               reporter.SpecSuiteWillBegin(runner.config, summary)
+       }
+}
+
+func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
+       for _, reporter := range runner.reporters {
+               reporter.BeforeSuiteDidRun(summary)
+       }
+}
+
+func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
+       for _, reporter := range runner.reporters {
+               reporter.AfterSuiteDidRun(summary)
+       }
+}
+
+func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
+       runner.writer.Truncate()
+
+       for _, reporter := range runner.reporters {
+               reporter.SpecWillRun(summary)
+       }
+}
+
+func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
+       for i := len(runner.reporters) - 1; i >= 1; i-- {
+               runner.reporters[i].SpecDidComplete(summary)
+       }
+
+       if failed {
+               runner.writer.DumpOut()
+       }
+
+       runner.reporters[0].SpecDidComplete(summary)
+}
+
+func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
+       summary := runner.summary(success)
+       summary.RunTime = time.Since(runner.startTime)
+       for _, reporter := range runner.reporters {
+               reporter.SpecSuiteDidEnd(summary)
+       }
+}
+
+func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) {
+       count = 0
+
+       for _, spec := range runner.specs.Specs() {
+               if filter(spec) {
+                       count++
+               }
+       }
+
+       return count
+}
+
+func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
+       numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
+               return !ex.Skipped() && !ex.Pending()
+       })
+
+       numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
+               return ex.Pending()
+       })
+
+       numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
+               return ex.Skipped()
+       })
+
+       numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
+               return ex.Passed()
+       })
+
+       numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
+               return ex.Failed()
+       })
+
+       if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
+               numberOfFailedSpecs = numberOfSpecsThatWillBeRun
+       }
+
+       return &types.SuiteSummary{
+               SuiteDescription: runner.description,
+               SuiteSucceeded:   success,
+               SuiteID:          runner.suiteID,
+
+               NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(),
+               NumberOfTotalSpecs:                 len(runner.specs.Specs()),
+               NumberOfSpecsThatWillBeRun:         numberOfSpecsThatWillBeRun,
+               NumberOfPendingSpecs:               numberOfPendingSpecs,
+               NumberOfSkippedSpecs:               numberOfSkippedSpecs,
+               NumberOfPassedSpecs:                numberOfPassedSpecs,
+               NumberOfFailedSpecs:                numberOfFailedSpecs,
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/suite/suite.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/suite/suite.go
new file mode 100644 (file)
index 0000000..a054602
--- /dev/null
@@ -0,0 +1,171 @@
+package suite
+
+import (
+       "math/rand"
+       "time"
+
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/internal/containernode"
+       "github.com/onsi/ginkgo/internal/failer"
+       "github.com/onsi/ginkgo/internal/leafnodes"
+       "github.com/onsi/ginkgo/internal/spec"
+       "github.com/onsi/ginkgo/internal/specrunner"
+       "github.com/onsi/ginkgo/internal/writer"
+       "github.com/onsi/ginkgo/reporters"
+       "github.com/onsi/ginkgo/types"
+)
+
+type ginkgoTestingT interface {
+       Fail()
+}
+
+type Suite struct {
+       topLevelContainer *containernode.ContainerNode
+       currentContainer  *containernode.ContainerNode
+       containerIndex    int
+       beforeSuiteNode   leafnodes.SuiteNode
+       afterSuiteNode    leafnodes.SuiteNode
+       runner            *specrunner.SpecRunner
+       failer            *failer.Failer
+       running           bool
+}
+
+func New(failer *failer.Failer) *Suite {
+       topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
+
+       return &Suite{
+               topLevelContainer: topLevelContainer,
+               currentContainer:  topLevelContainer,
+               failer:            failer,
+               containerIndex:    1,
+       }
+}
+
+func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
+       if config.ParallelTotal < 1 {
+               panic("ginkgo.parallel.total must be >= 1")
+       }
+
+       if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
+               panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
+       }
+
+       r := rand.New(rand.NewSource(config.RandomSeed))
+       suite.topLevelContainer.Shuffle(r)
+       specs := suite.generateSpecs(description, config)
+       suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config)
+
+       suite.running = true
+       success := suite.runner.Run()
+       if !success {
+               t.Fail()
+       }
+       return success, specs.HasProgrammaticFocus()
+}
+
+func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs {
+       specsSlice := []*spec.Spec{}
+       suite.topLevelContainer.BackPropagateProgrammaticFocus()
+       for _, collatedNodes := range suite.topLevelContainer.Collate() {
+               specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
+       }
+
+       specs := spec.NewSpecs(specsSlice)
+
+       if config.RandomizeAllSpecs {
+               specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
+       }
+
+       specs.ApplyFocus(description, config.FocusString, config.SkipString)
+
+       if config.SkipMeasurements {
+               specs.SkipMeasurements()
+       }
+
+       if config.ParallelTotal > 1 {
+               specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode)
+       }
+
+       return specs
+}
+
+func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
+       return suite.runner.CurrentSpecSummary()
+}
+
+func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+       if suite.beforeSuiteNode != nil {
+               panic("You may only call BeforeSuite once!")
+       }
+       suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+       if suite.afterSuiteNode != nil {
+               panic("You may only call AfterSuite once!")
+       }
+       suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+       if suite.beforeSuiteNode != nil {
+               panic("You may only call BeforeSuite once!")
+       }
+       suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+       if suite.afterSuiteNode != nil {
+               panic("You may only call AfterSuite once!")
+       }
+       suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
+       container := containernode.New(text, flag, codeLocation)
+       suite.currentContainer.PushContainerNode(container)
+
+       previousContainer := suite.currentContainer
+       suite.currentContainer = container
+       suite.containerIndex++
+
+       body()
+
+       suite.containerIndex--
+       suite.currentContainer = previousContainer
+}
+
+func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
+       if suite.running {
+               suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation)
+       }
+       suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
+       if suite.running {
+               suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation)
+       }
+       suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+       if suite.running {
+               suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation)
+       }
+       suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+       if suite.running {
+               suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation)
+       }
+       suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+       if suite.running {
+               suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation)
+       }
+       suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
new file mode 100644 (file)
index 0000000..a2b9af8
--- /dev/null
@@ -0,0 +1,76 @@
+package testingtproxy
+
+import (
+       "fmt"
+       "io"
+)
+
+type failFunc func(message string, callerSkip ...int)
+
+func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
+       return &ginkgoTestingTProxy{
+               fail:   fail,
+               offset: offset,
+               writer: writer,
+       }
+}
+
+type ginkgoTestingTProxy struct {
+       fail   failFunc
+       offset int
+       writer io.Writer
+}
+
+func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
+       t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
+       t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fail() {
+       t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) FailNow() {
+       t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
+       t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
+       t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
+       fmt.Fprintln(t.writer, args...)
+}
+
+func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
+       fmt.Fprintf(t.writer, format, args...)
+}
+
+func (t *ginkgoTestingTProxy) Failed() bool {
+       return false
+}
+
+func (t *ginkgoTestingTProxy) Parallel() {
+}
+
+func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
+       fmt.Println(args...)
+}
+
+func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
+       fmt.Printf(format, args...)
+}
+
+func (t *ginkgoTestingTProxy) SkipNow() {
+}
+
+func (t *ginkgoTestingTProxy) Skipped() bool {
+       return false
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/writer/fake_writer.go
new file mode 100644 (file)
index 0000000..ac6540f
--- /dev/null
@@ -0,0 +1,31 @@
+package writer
+
+type FakeGinkgoWriter struct {
+       EventStream []string
+}
+
+func NewFake() *FakeGinkgoWriter {
+       return &FakeGinkgoWriter{
+               EventStream: []string{},
+       }
+}
+
+func (writer *FakeGinkgoWriter) AddEvent(event string) {
+       writer.EventStream = append(writer.EventStream, event)
+}
+
+func (writer *FakeGinkgoWriter) Truncate() {
+       writer.EventStream = append(writer.EventStream, "TRUNCATE")
+}
+
+func (writer *FakeGinkgoWriter) DumpOut() {
+       writer.EventStream = append(writer.EventStream, "DUMP")
+}
+
+func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
+       writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
+}
+
+func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
+       return 0, nil
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/writer/writer.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/writer/writer.go
new file mode 100644 (file)
index 0000000..7678fc1
--- /dev/null
@@ -0,0 +1,71 @@
+package writer
+
+import (
+       "bytes"
+       "io"
+       "sync"
+)
+
+type WriterInterface interface {
+       io.Writer
+
+       Truncate()
+       DumpOut()
+       DumpOutWithHeader(header string)
+}
+
+type Writer struct {
+       buffer    *bytes.Buffer
+       outWriter io.Writer
+       lock      *sync.Mutex
+       stream    bool
+}
+
+func New(outWriter io.Writer) *Writer {
+       return &Writer{
+               buffer:    &bytes.Buffer{},
+               lock:      &sync.Mutex{},
+               outWriter: outWriter,
+               stream:    true,
+       }
+}
+
+func (w *Writer) SetStream(stream bool) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+       w.stream = stream
+}
+
+func (w *Writer) Write(b []byte) (n int, err error) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       if w.stream {
+               return w.outWriter.Write(b)
+       } else {
+               return w.buffer.Write(b)
+       }
+}
+
+func (w *Writer) Truncate() {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+       w.buffer.Reset()
+}
+
+func (w *Writer) DumpOut() {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+       if !w.stream {
+               w.buffer.WriteTo(w.outWriter)
+       }
+}
+
+func (w *Writer) DumpOutWithHeader(header string) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+       if !w.stream && w.buffer.Len() > 0 {
+               w.outWriter.Write([]byte(header))
+               w.buffer.WriteTo(w.outWriter)
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/default_reporter.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/default_reporter.go
new file mode 100644 (file)
index 0000000..044d2df
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+Ginkgo's Default Reporter
+
+A number of command line flags are available to tweak Ginkgo's default output.
+
+These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
+*/
+package reporters
+
+import (
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/reporters/stenographer"
+       "github.com/onsi/ginkgo/types"
+)
+
+type DefaultReporter struct {
+       config        config.DefaultReporterConfigType
+       stenographer  stenographer.Stenographer
+       specSummaries []*types.SpecSummary
+}
+
+func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
+       return &DefaultReporter{
+               config:       config,
+               stenographer: stenographer,
+       }
+}
+
+func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+       reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
+       if config.ParallelTotal > 1 {
+               reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct)
+       }
+       reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
+}
+
+func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+       if setupSummary.State != types.SpecStatePassed {
+               reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
+       }
+}
+
+func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+       if setupSummary.State != types.SpecStatePassed {
+               reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
+       }
+}
+
+func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
+       if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
+               reporter.stenographer.AnnounceSpecWillRun(specSummary)
+       }
+}
+
+func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+       switch specSummary.State {
+       case types.SpecStatePassed:
+               if specSummary.IsMeasurement {
+                       reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
+               } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
+                       reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
+               } else {
+                       reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
+               }
+       case types.SpecStatePending:
+               reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
+       case types.SpecStateSkipped:
+               reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
+       case types.SpecStateTimedOut:
+               reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
+       case types.SpecStatePanicked:
+               reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
+       case types.SpecStateFailed:
+               reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
+       }
+
+       reporter.specSummaries = append(reporter.specSummaries, specSummary)
+}
+
+func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+       reporter.stenographer.SummarizeFailures(reporter.specSummaries)
+       reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/fake_reporter.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/fake_reporter.go
new file mode 100644 (file)
index 0000000..27db479
--- /dev/null
@@ -0,0 +1,59 @@
+package reporters
+
+import (
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/types"
+)
+
+//FakeReporter is useful for testing purposes
+type FakeReporter struct {
+       Config config.GinkgoConfigType
+
+       BeginSummary         *types.SuiteSummary
+       BeforeSuiteSummary   *types.SetupSummary
+       SpecWillRunSummaries []*types.SpecSummary
+       SpecSummaries        []*types.SpecSummary
+       AfterSuiteSummary    *types.SetupSummary
+       EndSummary           *types.SuiteSummary
+
+       SpecWillRunStub     func(specSummary *types.SpecSummary)
+       SpecDidCompleteStub func(specSummary *types.SpecSummary)
+}
+
+func NewFakeReporter() *FakeReporter {
+       return &FakeReporter{
+               SpecWillRunSummaries: make([]*types.SpecSummary, 0),
+               SpecSummaries:        make([]*types.SpecSummary, 0),
+       }
+}
+
+func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+       fakeR.Config = config
+       fakeR.BeginSummary = summary
+}
+
+func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+       fakeR.BeforeSuiteSummary = setupSummary
+}
+
+func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
+       if fakeR.SpecWillRunStub != nil {
+               fakeR.SpecWillRunStub(specSummary)
+       }
+       fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
+}
+
+func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+       if fakeR.SpecDidCompleteStub != nil {
+               fakeR.SpecDidCompleteStub(specSummary)
+       }
+       fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
+}
+
+func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+       fakeR.AfterSuiteSummary = setupSummary
+}
+
+func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+       fakeR.EndSummary = summary
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/junit_reporter.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/junit_reporter.go
new file mode 100644 (file)
index 0000000..278a88e
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+
+JUnit XML Reporter for Ginkgo
+
+For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
+
+*/
+
+package reporters
+
+import (
+       "encoding/xml"
+       "fmt"
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/types"
+       "os"
+       "strings"
+)
+
+type JUnitTestSuite struct {
+       XMLName   xml.Name        `xml:"testsuite"`
+       TestCases []JUnitTestCase `xml:"testcase"`
+       Tests     int             `xml:"tests,attr"`
+       Failures  int             `xml:"failures,attr"`
+       Time      float64         `xml:"time,attr"`
+}
+
+type JUnitTestCase struct {
+       Name           string               `xml:"name,attr"`
+       ClassName      string               `xml:"classname,attr"`
+       FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
+       Skipped        *JUnitSkipped        `xml:"skipped,omitempty"`
+       Time           float64              `xml:"time,attr"`
+}
+
+type JUnitFailureMessage struct {
+       Type    string `xml:"type,attr"`
+       Message string `xml:",chardata"`
+}
+
+type JUnitSkipped struct {
+       XMLName xml.Name `xml:"skipped"`
+}
+
+type JUnitReporter struct {
+       suite         JUnitTestSuite
+       filename      string
+       testSuiteName string
+}
+
+//NewJUnitReporter creates a new JUnit XML reporter.  The XML will be stored in the passed in filename.
+func NewJUnitReporter(filename string) *JUnitReporter {
+       return &JUnitReporter{
+               filename: filename,
+       }
+}
+
+func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+       reporter.suite = JUnitTestSuite{
+               Tests:     summary.NumberOfSpecsThatWillBeRun,
+               TestCases: []JUnitTestCase{},
+       }
+       reporter.testSuiteName = summary.SuiteDescription
+}
+
+func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
+}
+
+func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+       reporter.handleSetupSummary("BeforeSuite", setupSummary)
+}
+
+func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+       reporter.handleSetupSummary("AfterSuite", setupSummary)
+}
+
+func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
+       if setupSummary.State != types.SpecStatePassed {
+               testCase := JUnitTestCase{
+                       Name:      name,
+                       ClassName: reporter.testSuiteName,
+               }
+
+               testCase.FailureMessage = &JUnitFailureMessage{
+                       Type:    reporter.failureTypeForState(setupSummary.State),
+                       Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message),
+               }
+               testCase.Time = setupSummary.RunTime.Seconds()
+               reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
+       }
+}
+
+func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+       testCase := JUnitTestCase{
+               Name:      strings.Join(specSummary.ComponentTexts[1:], " "),
+               ClassName: reporter.testSuiteName,
+       }
+       if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
+               testCase.FailureMessage = &JUnitFailureMessage{
+                       Type:    reporter.failureTypeForState(specSummary.State),
+                       Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message),
+               }
+       }
+       if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
+               testCase.Skipped = &JUnitSkipped{}
+       }
+       testCase.Time = specSummary.RunTime.Seconds()
+       reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
+}
+
+func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+       reporter.suite.Time = summary.RunTime.Seconds()
+       reporter.suite.Failures = summary.NumberOfFailedSpecs
+       file, err := os.Create(reporter.filename)
+       if err != nil {
+               fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
+       }
+       defer file.Close()
+       file.WriteString(xml.Header)
+       encoder := xml.NewEncoder(file)
+       encoder.Indent("  ", "    ")
+       err = encoder.Encode(reporter.suite)
+       if err != nil {
+               fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error())
+       }
+}
+
+func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
+       switch state {
+       case types.SpecStateFailed:
+               return "Failure"
+       case types.SpecStateTimedOut:
+               return "Timeout"
+       case types.SpecStatePanicked:
+               return "Panic"
+       default:
+               return ""
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/reporter.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/reporter.go
new file mode 100644 (file)
index 0000000..348b9df
--- /dev/null
@@ -0,0 +1,15 @@
+package reporters
+
+import (
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/types"
+)
+
+type Reporter interface {
+       SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
+       BeforeSuiteDidRun(setupSummary *types.SetupSummary)
+       SpecWillRun(specSummary *types.SpecSummary)
+       SpecDidComplete(specSummary *types.SpecSummary)
+       AfterSuiteDidRun(setupSummary *types.SetupSummary)
+       SpecSuiteDidEnd(summary *types.SuiteSummary)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
new file mode 100644 (file)
index 0000000..ce5433a
--- /dev/null
@@ -0,0 +1,64 @@
+package stenographer
+
+import (
+       "fmt"
+       "strings"
+)
+
+func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
+       var out string
+
+       if len(args) > 0 {
+               out = fmt.Sprintf(format, args...)
+       } else {
+               out = format
+       }
+
+       if s.color {
+               return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
+       } else {
+               return out
+       }
+}
+
+func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
+       fmt.Println(text)
+       fmt.Println(strings.Repeat(bannerCharacter, len(text)))
+}
+
+func (s *consoleStenographer) printNewLine() {
+       fmt.Println("")
+}
+
+func (s *consoleStenographer) printDelimiter() {
+       fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
+}
+
+func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
+       fmt.Print(s.indent(indentation, format, args...))
+}
+
+func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
+       fmt.Println(s.indent(indentation, format, args...))
+}
+
+func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
+       var text string
+
+       if len(args) > 0 {
+               text = fmt.Sprintf(format, args...)
+       } else {
+               text = format
+       }
+
+       stringArray := strings.Split(text, "\n")
+       padding := ""
+       if indentation >= 0 {
+               padding = strings.Repeat("  ", indentation)
+       }
+       for i, s := range stringArray {
+               stringArray[i] = fmt.Sprintf("%s%s", padding, s)
+       }
+
+       return strings.Join(stringArray, "\n")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
new file mode 100644 (file)
index 0000000..1ff6104
--- /dev/null
@@ -0,0 +1,138 @@
+package stenographer
+
+import (
+       "sync"
+
+       "github.com/onsi/ginkgo/types"
+)
+
+func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
+       return FakeStenographerCall{
+               Method: method,
+               Args:   args,
+       }
+}
+
+type FakeStenographer struct {
+       calls []FakeStenographerCall
+       lock  *sync.Mutex
+}
+
+type FakeStenographerCall struct {
+       Method string
+       Args   []interface{}
+}
+
+func NewFakeStenographer() *FakeStenographer {
+       stenographer := &FakeStenographer{
+               lock: &sync.Mutex{},
+       }
+       stenographer.Reset()
+       return stenographer
+}
+
+func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
+       stenographer.lock.Lock()
+       defer stenographer.lock.Unlock()
+
+       return stenographer.calls
+}
+
+func (stenographer *FakeStenographer) Reset() {
+       stenographer.lock.Lock()
+       defer stenographer.lock.Unlock()
+
+       stenographer.calls = make([]FakeStenographerCall, 0)
+}
+
+func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
+       stenographer.lock.Lock()
+       defer stenographer.lock.Unlock()
+
+       results := make([]FakeStenographerCall, 0)
+       for _, call := range stenographer.calls {
+               if call.Method == method {
+                       results = append(results, call)
+               }
+       }
+
+       return results
+}
+
+func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
+       stenographer.lock.Lock()
+       defer stenographer.lock.Unlock()
+
+       stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
+}
+
+func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
+       stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
+       stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
+       stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
+       stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
+       stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
+       stenographer.registerCall("AnnounceSpecWillRun", spec)
+}
+
+func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
+       stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
+}
+
+func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
+       stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
+}
+func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
+       stenographer.registerCall("AnnounceCapturedOutput", output)
+}
+
+func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
+       stenographer.registerCall("AnnounceSuccesfulSpec", spec)
+}
+
+func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
+       stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
+       stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
+}
+
+func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
+       stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
+}
+
+func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+       stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
+}
+
+func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+       stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
+}
+
+func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+       stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
+}
+
+func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+       stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
+}
+
+func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
+       stenographer.registerCall("SummarizeFailures", summaries)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
new file mode 100644 (file)
index 0000000..5b5d905
--- /dev/null
@@ -0,0 +1,549 @@
+/*
+The stenographer is used by Ginkgo's reporters to generate output.
+
+Move along, nothing to see here.
+*/
+
+package stenographer
+
+import (
+       "fmt"
+       "runtime"
+       "strings"
+
+       "github.com/onsi/ginkgo/types"
+)
+
+const defaultStyle = "\x1b[0m"
+const boldStyle = "\x1b[1m"
+const redColor = "\x1b[91m"
+const greenColor = "\x1b[32m"
+const yellowColor = "\x1b[33m"
+const cyanColor = "\x1b[36m"
+const grayColor = "\x1b[90m"
+const lightGrayColor = "\x1b[37m"
+
+type cursorStateType int
+
+const (
+       cursorStateTop cursorStateType = iota
+       cursorStateStreaming
+       cursorStateMidBlock
+       cursorStateEndBlock
+)
+
+type Stenographer interface {
+       AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
+       AnnounceAggregatedParallelRun(nodes int, succinct bool)
+       AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool)
+       AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
+       AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
+
+       AnnounceSpecWillRun(spec *types.SpecSummary)
+       AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
+       AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
+
+       AnnounceCapturedOutput(output string)
+
+       AnnounceSuccesfulSpec(spec *types.SpecSummary)
+       AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
+       AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
+
+       AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
+       AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
+
+       AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
+       AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
+       AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
+
+       SummarizeFailures(summaries []*types.SpecSummary)
+}
+
+func New(color bool) Stenographer {
+       denoter := "•"
+       if runtime.GOOS == "windows" {
+               denoter = "+"
+       }
+       return &consoleStenographer{
+               color:       color,
+               denoter:     denoter,
+               cursorState: cursorStateTop,
+       }
+}
+
+type consoleStenographer struct {
+       color       bool
+       denoter     string
+       cursorState cursorStateType
+}
+
+var alternatingColors = []string{defaultStyle, grayColor}
+
+func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
+       if succinct {
+               s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
+               return
+       }
+       s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
+       s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
+       if randomizingAll {
+               s.print(0, " - Will randomize all specs")
+       }
+       s.printNewLine()
+}
+
+func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
+       if succinct {
+               s.print(0, "- node #%d ", node)
+               return
+       }
+       s.println(0,
+               "Parallel test node %s/%s. Assigned %s of %s specs.",
+               s.colorize(boldStyle, "%d", node),
+               s.colorize(boldStyle, "%d", nodes),
+               s.colorize(boldStyle, "%d", specsToRun),
+               s.colorize(boldStyle, "%d", totalSpecs),
+       )
+       s.printNewLine()
+}
+
+func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
+       if succinct {
+               s.print(0, "- %d nodes ", nodes)
+               return
+       }
+       s.println(0,
+               "Running in parallel across %s nodes",
+               s.colorize(boldStyle, "%d", nodes),
+       )
+       s.printNewLine()
+}
+
+func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
+       if succinct {
+               s.print(0, "- %d/%d specs ", specsToRun, total)
+               s.stream()
+               return
+       }
+       s.println(0,
+               "Will run %s of %s specs",
+               s.colorize(boldStyle, "%d", specsToRun),
+               s.colorize(boldStyle, "%d", total),
+       )
+
+       s.printNewLine()
+}
+
+func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
+       if succinct && summary.SuiteSucceeded {
+               s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
+               return
+       }
+       s.printNewLine()
+       color := greenColor
+       if !summary.SuiteSucceeded {
+               color = redColor
+       }
+       s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
+
+       status := ""
+       if summary.SuiteSucceeded {
+               status = s.colorize(boldStyle+greenColor, "SUCCESS!")
+       } else {
+               status = s.colorize(boldStyle+redColor, "FAIL!")
+       }
+
+       s.print(0,
+               "%s -- %s | %s | %s | %s ",
+               status,
+               s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
+               s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs),
+               s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
+               s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
+       )
+}
+
+func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
+       s.startBlock()
+       for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
+               s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
+       }
+
+       indentation := 0
+       if len(spec.ComponentTexts) > 2 {
+               indentation = 1
+               s.printNewLine()
+       }
+       index := len(spec.ComponentTexts) - 1
+       s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
+       s.printNewLine()
+       s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
+       s.printNewLine()
+       s.midBlock()
+}
+
+func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
+       s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
+}
+
+func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
+       s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
+}
+
+func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
+       s.startBlock()
+       var message string
+       switch summary.State {
+       case types.SpecStateFailed:
+               message = "Failure"
+       case types.SpecStatePanicked:
+               message = "Panic"
+       case types.SpecStateTimedOut:
+               message = "Timeout"
+       }
+
+       s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
+
+       indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
+
+       s.printNewLine()
+       s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
+
+       s.endBlock()
+}
+
+func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
+       if output == "" {
+               return
+       }
+
+       s.startBlock()
+       s.println(0, output)
+       s.midBlock()
+}
+
+func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
+       s.print(0, s.colorize(greenColor, s.denoter))
+       s.stream()
+}
+
+func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
+       s.printBlockWithMessage(
+               s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
+               "",
+               spec,
+               succinct,
+       )
+}
+
+func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
+       s.printBlockWithMessage(
+               s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
+               s.measurementReport(spec, succinct),
+               spec,
+               succinct,
+       )
+}
+
+func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
+       if noisy {
+               s.printBlockWithMessage(
+                       s.colorize(yellowColor, "P [PENDING]"),
+                       "",
+                       spec,
+                       false,
+               )
+       } else {
+               s.print(0, s.colorize(yellowColor, "P"))
+               s.stream()
+       }
+}
+
+func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+       // Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
+       if succinct || spec.Failure == (types.SpecFailure{}) {
+               s.print(0, s.colorize(cyanColor, "S"))
+               s.stream()
+       } else {
+               s.startBlock()
+               s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
+
+               indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
+
+               s.printNewLine()
+               s.printSkip(indentation, spec.Failure)
+               s.endBlock()
+       }
+}
+
+func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+       s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
+}
+
+func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+       s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
+}
+
+func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
+       s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
+}
+
+func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
+       failingSpecs := []*types.SpecSummary{}
+
+       for _, summary := range summaries {
+               if summary.HasFailureState() {
+                       failingSpecs = append(failingSpecs, summary)
+               }
+       }
+
+       if len(failingSpecs) == 0 {
+               return
+       }
+
+       s.printNewLine()
+       s.printNewLine()
+       plural := "s"
+       if len(failingSpecs) == 1 {
+               plural = ""
+       }
+       s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
+       for _, summary := range failingSpecs {
+               s.printNewLine()
+               if summary.HasFailureState() {
+                       if summary.TimedOut() {
+                               s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
+                       } else if summary.Panicked() {
+                               s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
+                       } else if summary.Failed() {
+                               s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
+                       }
+                       s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
+                       s.printNewLine()
+                       s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
+               }
+       }
+}
+
+func (s *consoleStenographer) startBlock() {
+       if s.cursorState == cursorStateStreaming {
+               s.printNewLine()
+               s.printDelimiter()
+       } else if s.cursorState == cursorStateMidBlock {
+               s.printNewLine()
+       }
+}
+
+func (s *consoleStenographer) midBlock() {
+       s.cursorState = cursorStateMidBlock
+}
+
+func (s *consoleStenographer) endBlock() {
+       s.printDelimiter()
+       s.cursorState = cursorStateEndBlock
+}
+
+func (s *consoleStenographer) stream() {
+       s.cursorState = cursorStateStreaming
+}
+
+func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
+       s.startBlock()
+       s.println(0, header)
+
+       indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
+
+       if message != "" {
+               s.printNewLine()
+               s.println(indentation, message)
+       }
+
+       s.endBlock()
+}
+
+func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
+       s.startBlock()
+       s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
+
+       indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
+
+       s.printNewLine()
+       s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
+       s.endBlock()
+}
+
+func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
+       switch failedComponentType {
+       case types.SpecComponentTypeBeforeSuite:
+               return " in Suite Setup (BeforeSuite)"
+       case types.SpecComponentTypeAfterSuite:
+               return " in Suite Teardown (AfterSuite)"
+       case types.SpecComponentTypeBeforeEach:
+               return " in Spec Setup (BeforeEach)"
+       case types.SpecComponentTypeJustBeforeEach:
+               return " in Spec Setup (JustBeforeEach)"
+       case types.SpecComponentTypeAfterEach:
+               return " in Spec Teardown (AfterEach)"
+       }
+
+       return ""
+}
+
+func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
+       s.println(indentation, s.colorize(cyanColor, spec.Message))
+       s.printNewLine()
+       s.println(indentation, spec.Location.String())
+}
+
+func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
+       if state == types.SpecStatePanicked {
+               s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
+               s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
+               s.println(indentation, failure.Location.String())
+               s.printNewLine()
+               s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
+               s.println(indentation, failure.Location.FullStackTrace)
+       } else {
+               s.println(indentation, s.colorize(redColor, failure.Message))
+               s.printNewLine()
+               s.println(indentation, failure.Location.String())
+               if fullTrace {
+                       s.printNewLine()
+                       s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
+                       s.println(indentation, failure.Location.FullStackTrace)
+               }
+       }
+}
+
+func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
+       startIndex := 1
+       indentation := 0
+
+       if len(componentTexts) == 1 {
+               startIndex = 0
+       }
+
+       for i := startIndex; i < len(componentTexts); i++ {
+               if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
+                       color := redColor
+                       if state == types.SpecStateSkipped {
+                               color = cyanColor
+                       }
+                       blockType := ""
+                       switch failedComponentType {
+                       case types.SpecComponentTypeBeforeSuite:
+                               blockType = "BeforeSuite"
+                       case types.SpecComponentTypeAfterSuite:
+                               blockType = "AfterSuite"
+                       case types.SpecComponentTypeBeforeEach:
+                               blockType = "BeforeEach"
+                       case types.SpecComponentTypeJustBeforeEach:
+                               blockType = "JustBeforeEach"
+                       case types.SpecComponentTypeAfterEach:
+                               blockType = "AfterEach"
+                       case types.SpecComponentTypeIt:
+                               blockType = "It"
+                       case types.SpecComponentTypeMeasure:
+                               blockType = "Measurement"
+                       }
+                       if succinct {
+                               s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
+                       } else {
+                               s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
+                               s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
+                       }
+               } else {
+                       if succinct {
+                               s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
+                       } else {
+                               s.println(indentation, componentTexts[i])
+                               s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
+                       }
+               }
+               indentation++
+       }
+
+       return indentation
+}
+
+func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
+       indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
+
+       if succinct {
+               if len(componentTexts) > 0 {
+                       s.printNewLine()
+                       s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
+               }
+               s.printNewLine()
+               indentation = 1
+       } else {
+               indentation--
+       }
+
+       return indentation
+}
+
+func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
+       orderedKeys := make([]string, len(measurements))
+       for key, measurement := range measurements {
+               orderedKeys[measurement.Order] = key
+       }
+       return orderedKeys
+}
+
+func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
+       if len(spec.Measurements) == 0 {
+               return "Found no measurements"
+       }
+
+       message := []string{}
+       orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
+
+       if succinct {
+               message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
+               for _, key := range orderedKeys {
+                       measurement := spec.Measurements[key]
+                       message = append(message, fmt.Sprintf("  %s - %s: %s%s, %s: %s%s Â± %s%s, %s: %s%s",
+                               s.colorize(boldStyle, "%s", measurement.Name),
+                               measurement.SmallestLabel,
+                               s.colorize(greenColor, "%.3f", measurement.Smallest),
+                               measurement.Units,
+                               measurement.AverageLabel,
+                               s.colorize(cyanColor, "%.3f", measurement.Average),
+                               measurement.Units,
+                               s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
+                               measurement.Units,
+                               measurement.LargestLabel,
+                               s.colorize(redColor, "%.3f", measurement.Largest),
+                               measurement.Units,
+                       ))
+               }
+       } else {
+               message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
+               for _, key := range orderedKeys {
+                       measurement := spec.Measurements[key]
+                       info := ""
+                       if measurement.Info != nil {
+                               message = append(message, fmt.Sprintf("%v", measurement.Info))
+                       }
+
+                       message = append(message, fmt.Sprintf("%s:\n%s  %s: %s%s\n  %s: %s%s\n  %s: %s%s Â± %s%s",
+                               s.colorize(boldStyle, "%s", measurement.Name),
+                               info,
+                               measurement.SmallestLabel,
+                               s.colorize(greenColor, "%.3f", measurement.Smallest),
+                               measurement.Units,
+                               measurement.LargestLabel,
+                               s.colorize(redColor, "%.3f", measurement.Largest),
+                               measurement.Units,
+                               measurement.AverageLabel,
+                               s.colorize(cyanColor, "%.3f", measurement.Average),
+                               measurement.Units,
+                               s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
+                               measurement.Units,
+                       ))
+               }
+       }
+
+       return strings.Join(message, "\n")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
new file mode 100644 (file)
index 0000000..657dfe7
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+
+TeamCity Reporter for Ginkgo
+
+Makes use of TeamCity's support for Service Messages
+http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
+*/
+
+package reporters
+
+import (
+       "fmt"
+       "github.com/onsi/ginkgo/config"
+       "github.com/onsi/ginkgo/types"
+       "io"
+       "strings"
+)
+
+const (
+       messageId = "##teamcity"
+)
+
+type TeamCityReporter struct {
+       writer        io.Writer
+       testSuiteName string
+}
+
+func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
+       return &TeamCityReporter{
+               writer: writer,
+       }
+}
+
+func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+       reporter.testSuiteName = escape(summary.SuiteDescription)
+       fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName)
+}
+
+func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+       reporter.handleSetupSummary("BeforeSuite", setupSummary)
+}
+
+func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+       reporter.handleSetupSummary("AfterSuite", setupSummary)
+}
+
+func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
+       if setupSummary.State != types.SpecStatePassed {
+               testName := escape(name)
+               fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
+               message := escape(setupSummary.Failure.ComponentCodeLocation.String())
+               details := escape(setupSummary.Failure.Message)
+               fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
+               durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
+               fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
+       }
+}
+
+func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
+       testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
+       fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
+}
+
+func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+       testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
+
+       if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
+               message := escape(specSummary.Failure.ComponentCodeLocation.String())
+               details := escape(specSummary.Failure.Message)
+               fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
+       }
+       if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
+               fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName)
+       }
+
+       durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
+       fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
+}
+
+func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+       fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName)
+}
+
+func escape(output string) string {
+       output = strings.Replace(output, "|", "||", -1)
+       output = strings.Replace(output, "'", "|'", -1)
+       output = strings.Replace(output, "\n", "|n", -1)
+       output = strings.Replace(output, "\r", "|r", -1)
+       output = strings.Replace(output, "[", "|[", -1)
+       output = strings.Replace(output, "]", "|]", -1)
+       return output
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/types/code_location.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/types/code_location.go
new file mode 100644 (file)
index 0000000..935a89e
--- /dev/null
@@ -0,0 +1,15 @@
+package types
+
+import (
+       "fmt"
+)
+
+type CodeLocation struct {
+       FileName       string
+       LineNumber     int
+       FullStackTrace string
+}
+
+func (codeLocation CodeLocation) String() string {
+       return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/types/synchronization.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/types/synchronization.go
new file mode 100644 (file)
index 0000000..fdd6ed5
--- /dev/null
@@ -0,0 +1,30 @@
+package types
+
+import (
+       "encoding/json"
+)
+
+type RemoteBeforeSuiteState int
+
+const (
+       RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota
+
+       RemoteBeforeSuiteStatePending
+       RemoteBeforeSuiteStatePassed
+       RemoteBeforeSuiteStateFailed
+       RemoteBeforeSuiteStateDisappeared
+)
+
+type RemoteBeforeSuiteData struct {
+       Data  []byte
+       State RemoteBeforeSuiteState
+}
+
+func (r RemoteBeforeSuiteData) ToJSON() []byte {
+       data, _ := json.Marshal(r)
+       return data
+}
+
+type RemoteAfterSuiteData struct {
+       CanRun bool
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/types/types.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/types/types.go
new file mode 100644 (file)
index 0000000..889612e
--- /dev/null
@@ -0,0 +1,143 @@
+package types
+
+import "time"
+
+const GINKGO_FOCUS_EXIT_CODE = 197
+
+type SuiteSummary struct {
+       SuiteDescription string
+       SuiteSucceeded   bool
+       SuiteID          string
+
+       NumberOfSpecsBeforeParallelization int
+       NumberOfTotalSpecs                 int
+       NumberOfSpecsThatWillBeRun         int
+       NumberOfPendingSpecs               int
+       NumberOfSkippedSpecs               int
+       NumberOfPassedSpecs                int
+       NumberOfFailedSpecs                int
+       RunTime                            time.Duration
+}
+
+type SpecSummary struct {
+       ComponentTexts         []string
+       ComponentCodeLocations []CodeLocation
+
+       State           SpecState
+       RunTime         time.Duration
+       Failure         SpecFailure
+       IsMeasurement   bool
+       NumberOfSamples int
+       Measurements    map[string]*SpecMeasurement
+
+       CapturedOutput string
+       SuiteID        string
+}
+
+func (s SpecSummary) HasFailureState() bool {
+       return s.State.IsFailure()
+}
+
+func (s SpecSummary) TimedOut() bool {
+       return s.State == SpecStateTimedOut
+}
+
+func (s SpecSummary) Panicked() bool {
+       return s.State == SpecStatePanicked
+}
+
+func (s SpecSummary) Failed() bool {
+       return s.State == SpecStateFailed
+}
+
+func (s SpecSummary) Passed() bool {
+       return s.State == SpecStatePassed
+}
+
+func (s SpecSummary) Skipped() bool {
+       return s.State == SpecStateSkipped
+}
+
+func (s SpecSummary) Pending() bool {
+       return s.State == SpecStatePending
+}
+
+type SetupSummary struct {
+       ComponentType SpecComponentType
+       CodeLocation  CodeLocation
+
+       State   SpecState
+       RunTime time.Duration
+       Failure SpecFailure
+
+       CapturedOutput string
+       SuiteID        string
+}
+
+type SpecFailure struct {
+       Message        string
+       Location       CodeLocation
+       ForwardedPanic string
+
+       ComponentIndex        int
+       ComponentType         SpecComponentType
+       ComponentCodeLocation CodeLocation
+}
+
+type SpecMeasurement struct {
+       Name  string
+       Info  interface{}
+       Order int
+
+       Results []float64
+
+       Smallest     float64
+       Largest      float64
+       Average      float64
+       StdDeviation float64
+
+       SmallestLabel string
+       LargestLabel  string
+       AverageLabel  string
+       Units         string
+}
+
+type SpecState uint
+
+const (
+       SpecStateInvalid SpecState = iota
+
+       SpecStatePending
+       SpecStateSkipped
+       SpecStatePassed
+       SpecStateFailed
+       SpecStatePanicked
+       SpecStateTimedOut
+)
+
+func (state SpecState) IsFailure() bool {
+       return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed
+}
+
+type SpecComponentType uint
+
+const (
+       SpecComponentTypeInvalid SpecComponentType = iota
+
+       SpecComponentTypeContainer
+       SpecComponentTypeBeforeSuite
+       SpecComponentTypeAfterSuite
+       SpecComponentTypeBeforeEach
+       SpecComponentTypeJustBeforeEach
+       SpecComponentTypeAfterEach
+       SpecComponentTypeIt
+       SpecComponentTypeMeasure
+)
+
+type FlagType uint
+
+const (
+       FlagTypeNone FlagType = iota
+       FlagTypeFocused
+       FlagTypePending
+)
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/.gitignore b/Godeps/_workspace/src/github.com/onsi/gomega/.gitignore
new file mode 100644 (file)
index 0000000..5514532
--- /dev/null
@@ -0,0 +1,3 @@
+.DS_Store
+*.test
+.
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/.travis.yml b/Godeps/_workspace/src/github.com/onsi/gomega/.travis.yml
new file mode 100644 (file)
index 0000000..79780ec
--- /dev/null
@@ -0,0 +1,11 @@
+language: go
+go:
+  - 1.4
+  - 1.5
+
+install:
+  - go get -v ./...
+  - go get github.com/onsi/ginkgo
+  - go install github.com/onsi/ginkgo/ginkgo
+
+script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/CHANGELOG.md b/Godeps/_workspace/src/github.com/onsi/gomega/CHANGELOG.md
new file mode 100644 (file)
index 0000000..0c5ede5
--- /dev/null
@@ -0,0 +1,70 @@
+## HEAD
+
+Improvements:
+
+- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks.  Can be paired with `Eventually` to safely send a value down a channel with a timeout.
+- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler.  This is always a mistake that can hide failing tests.
+- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Î©(c).Should(Receive()) always fails and Î©(c).ShoudlNot(Receive()) always passes with a closed channel.
+- Added `HavePrefix` and `HaveSuffix` matchers.
+- `ghttp` can now handle concurrent requests.
+- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
+- Improved `ghttp`'s behavior around failing assertions and panics:
+    - If a registered handler makes a failing assertion `ghttp` will return `500`.
+    - If a registered handler panics, `ghttp` will return `500` *and* fail the test.  This is new behavior that may cause existing code to break.  This code is almost certainly incorrect and creating a false positive.
+- `ghttp` servers can take an `io.Writer`.  `ghttp` will write a line to the writer when each request arrives.
+- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher
+- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers
+
+Bug Fixes:
+- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure.
+- `ContainElement` no longer bails if a passed-in matcher errors.
+
+## 1.0 (8/2/2014)
+
+No changes. Dropping "beta" from the version number.
+
+## 1.0.0-beta (7/8/2014)
+Breaking Changes:
+
+- Changed OmegaMatcher interface.  Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead.
+- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher.  Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher
+
+New Test-Support Features:
+
+- `ghttp`: supports testing http clients
+    - Provides a flexible fake http server
+    - Provides a collection of chainable http handlers that perform assertions.
+- `gbytes`: supports making ordered assertions against streams of data
+    - Provides a `gbytes.Buffer`
+    - Provides a `Say` matcher to perform ordered assertions against output data
+- `gexec`: supports testing external processes
+    - Provides support for building Go binaries
+    - Wraps and starts `exec.Cmd` commands
+    - Makes it easy to assert against stdout and stderr
+    - Makes it easy to send signals and wait for processes to exit
+    - Provides an `Exit` matcher to assert against exit code.
+
+DSL Changes:
+
+- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs.
+- The default timeouts for `Eventually` and `Consistently` are now configurable.
+
+New Matchers:
+
+- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map.
+- `BeTemporally`: like `BeNumerically` but for `time.Time`
+- `HaveKeyWithValue`: asserts a map has a given key with the given value.
+
+Updated Matchers:
+
+- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
+- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future.  For example, `Receive` returns `false` when a channel is closed.
+
+Misc:
+
+- Start using semantic versioning
+- Start maintaining changelog
+
+Major refactor:
+
+- Pull out Gomega's internal to `internal`
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/LICENSE b/Godeps/_workspace/src/github.com/onsi/gomega/LICENSE
new file mode 100644 (file)
index 0000000..9415ee7
--- /dev/null
@@ -0,0 +1,20 @@
+Copyright (c) 2013-2014 Onsi Fakhouri
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/README.md b/Godeps/_workspace/src/github.com/onsi/gomega/README.md
new file mode 100644 (file)
index 0000000..c825591
--- /dev/null
@@ -0,0 +1,17 @@
+![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png)
+
+[![Build Status](https://travis-ci.org/onsi/gomega.png)](https://travis-ci.org/onsi/gomega)
+
+Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers).
+
+To discuss Gomega and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
+
+## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang
+
+Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/)
+
+## License
+
+Gomega is MIT-Licensed
+
+The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution.  goraph has an MIT license.
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/format/format.go b/Godeps/_workspace/src/github.com/onsi/gomega/format/format.go
new file mode 100644 (file)
index 0000000..ec9c91a
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+Gomega's format package pretty-prints objects.  It explores input objects recursively and generates formatted, indented output with type information.
+*/
+package format
+
+import (
+       "fmt"
+       "reflect"
+       "strings"
+)
+
+// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects
+var MaxDepth = uint(10)
+
+/*
+By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output.
+
+Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead.
+
+Note that GoString and String don't always have all the information you need to understand why a test failed!
+*/
+var UseStringerRepresentation = false
+
+//The default indentation string emitted by the format package
+var Indent = "    "
+
+var longFormThreshold = 20
+
+/*
+Generates a formatted matcher success/failure message of the form:
+
+       Expected
+               <pretty printed actual>
+       <message>
+               <pretty printed expected>
+
+If expected is omited, then the message looks like:
+
+       Expected
+               <pretty printed actual>
+       <message>
+*/
+func Message(actual interface{}, message string, expected ...interface{}) string {
+       if len(expected) == 0 {
+               return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
+       } else {
+               return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1))
+       }
+}
+
+/*
+Pretty prints the passed in object at the passed in indentation level.
+
+Object recurses into deeply nested objects emitting pretty-printed representations of their components.
+
+Modify format.MaxDepth to control how deep the recursion is allowed to go
+Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of
+recursing into the object.
+*/
+func Object(object interface{}, indentation uint) string {
+       indent := strings.Repeat(Indent, int(indentation))
+       value := reflect.ValueOf(object)
+       return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation))
+}
+
+/*
+IndentString takes a string and indents each line by the specified amount.
+*/
+func IndentString(s string, indentation uint) string {
+       components := strings.Split(s, "\n")
+       result := ""
+       indent := strings.Repeat(Indent, int(indentation))
+       for i, component := range components {
+               result += indent + component
+               if i < len(components)-1 {
+                       result += "\n"
+               }
+       }
+
+       return result
+}
+
+func formatType(object interface{}) string {
+       t := reflect.TypeOf(object)
+       if t == nil {
+               return "nil"
+       }
+       switch t.Kind() {
+       case reflect.Chan:
+               v := reflect.ValueOf(object)
+               return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
+       case reflect.Ptr:
+               return fmt.Sprintf("%T | %p", object, object)
+       case reflect.Slice:
+               v := reflect.ValueOf(object)
+               return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
+       case reflect.Map:
+               v := reflect.ValueOf(object)
+               return fmt.Sprintf("%T | len:%d", object, v.Len())
+       default:
+               return fmt.Sprintf("%T", object)
+       }
+}
+
+func formatValue(value reflect.Value, indentation uint) string {
+       if indentation > MaxDepth {
+               return "..."
+       }
+
+       if isNilValue(value) {
+               return "nil"
+       }
+
+       if UseStringerRepresentation {
+               if value.CanInterface() {
+                       obj := value.Interface()
+                       switch x := obj.(type) {
+                       case fmt.GoStringer:
+                               return x.GoString()
+                       case fmt.Stringer:
+                               return x.String()
+                       }
+               }
+       }
+
+       switch value.Kind() {
+       case reflect.Bool:
+               return fmt.Sprintf("%v", value.Bool())
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               return fmt.Sprintf("%v", value.Int())
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+               return fmt.Sprintf("%v", value.Uint())
+       case reflect.Uintptr:
+               return fmt.Sprintf("0x%x", value.Uint())
+       case reflect.Float32, reflect.Float64:
+               return fmt.Sprintf("%v", value.Float())
+       case reflect.Complex64, reflect.Complex128:
+               return fmt.Sprintf("%v", value.Complex())
+       case reflect.Chan:
+               return fmt.Sprintf("0x%x", value.Pointer())
+       case reflect.Func:
+               return fmt.Sprintf("0x%x", value.Pointer())
+       case reflect.Ptr:
+               return formatValue(value.Elem(), indentation)
+       case reflect.Slice:
+               if value.Type().Elem().Kind() == reflect.Uint8 {
+                       return formatString(value.Bytes(), indentation)
+               }
+               return formatSlice(value, indentation)
+       case reflect.String:
+               return formatString(value.String(), indentation)
+       case reflect.Array:
+               return formatSlice(value, indentation)
+       case reflect.Map:
+               return formatMap(value, indentation)
+       case reflect.Struct:
+               return formatStruct(value, indentation)
+       case reflect.Interface:
+               return formatValue(value.Elem(), indentation)
+       default:
+               if value.CanInterface() {
+                       return fmt.Sprintf("%#v", value.Interface())
+               } else {
+                       return fmt.Sprintf("%#v", value)
+               }
+       }
+}
+
+func formatString(object interface{}, indentation uint) string {
+       if indentation == 1 {
+               s := fmt.Sprintf("%s", object)
+               components := strings.Split(s, "\n")
+               result := ""
+               for i, component := range components {
+                       if i == 0 {
+                               result += component
+                       } else {
+                               result += Indent + component
+                       }
+                       if i < len(components)-1 {
+                               result += "\n"
+                       }
+               }
+
+               return fmt.Sprintf("%s", result)
+       } else {
+               return fmt.Sprintf("%q", object)
+       }
+}
+
+func formatSlice(v reflect.Value, indentation uint) string {
+       l := v.Len()
+       result := make([]string, l)
+       longest := 0
+       for i := 0; i < l; i++ {
+               result[i] = formatValue(v.Index(i), indentation+1)
+               if len(result[i]) > longest {
+                       longest = len(result[i])
+               }
+       }
+
+       if longest > longFormThreshold {
+               indenter := strings.Repeat(Indent, int(indentation))
+               return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
+       } else {
+               return fmt.Sprintf("[%s]", strings.Join(result, ", "))
+       }
+}
+
+func formatMap(v reflect.Value, indentation uint) string {
+       l := v.Len()
+       result := make([]string, l)
+
+       longest := 0
+       for i, key := range v.MapKeys() {
+               value := v.MapIndex(key)
+               result[i] = fmt.Sprintf("%s: %s", formatValue(key, 0), formatValue(value, indentation+1))
+               if len(result[i]) > longest {
+                       longest = len(result[i])
+               }
+       }
+
+       if longest > longFormThreshold {
+               indenter := strings.Repeat(Indent, int(indentation))
+               return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
+       } else {
+               return fmt.Sprintf("{%s}", strings.Join(result, ", "))
+       }
+}
+
+func formatStruct(v reflect.Value, indentation uint) string {
+       t := v.Type()
+
+       l := v.NumField()
+       result := []string{}
+       longest := 0
+       for i := 0; i < l; i++ {
+               structField := t.Field(i)
+               fieldEntry := v.Field(i)
+               representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1))
+               result = append(result, representation)
+               if len(representation) > longest {
+                       longest = len(representation)
+               }
+       }
+       if longest > longFormThreshold {
+               indenter := strings.Repeat(Indent, int(indentation))
+               return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
+       } else {
+               return fmt.Sprintf("{%s}", strings.Join(result, ", "))
+       }
+}
+
+func isNilValue(a reflect.Value) bool {
+       switch a.Kind() {
+       case reflect.Invalid:
+               return true
+       case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+               return a.IsNil()
+       }
+
+       return false
+}
+
+func isNil(a interface{}) bool {
+       if a == nil {
+               return true
+       }
+
+       switch reflect.TypeOf(a).Kind() {
+       case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+               return reflect.ValueOf(a).IsNil()
+       }
+
+       return false
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/gbytes/buffer.go b/Godeps/_workspace/src/github.com/onsi/gomega/gbytes/buffer.go
new file mode 100644 (file)
index 0000000..8775b86
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+Package gbytes provides a buffer that supports incrementally detecting input.
+
+You use gbytes.Buffer with the gbytes.Say matcher.  When Say finds a match, it fastforwards the buffer's read cursor to the end of that match.
+
+Subsequent matches against the buffer will only operate against data that appears *after* the read cursor.
+
+The read cursor is an opaque implementation detail that you cannot access.  You should use the Say matcher to sift through the buffer.  You can always
+access the entire buffer's contents with Contents().
+
+*/
+package gbytes
+
+import (
+       "errors"
+       "fmt"
+       "io"
+       "regexp"
+       "sync"
+       "time"
+)
+
+/*
+gbytes.Buffer implements an io.Writer and can be used with the gbytes.Say matcher.
+
+You should only use a gbytes.Buffer in test code.  It stores all writes in an in-memory buffer - behavior that is inappropriate for production code!
+*/
+type Buffer struct {
+       contents     []byte
+       readCursor   uint64
+       lock         *sync.Mutex
+       detectCloser chan interface{}
+       closed       bool
+}
+
+/*
+NewBuffer returns a new gbytes.Buffer
+*/
+func NewBuffer() *Buffer {
+       return &Buffer{
+               lock: &sync.Mutex{},
+       }
+}
+
+/*
+BufferWithBytes returns a new gbytes.Buffer seeded with the passed in bytes
+*/
+func BufferWithBytes(bytes []byte) *Buffer {
+       return &Buffer{
+               lock:     &sync.Mutex{},
+               contents: bytes,
+       }
+}
+
+/*
+Write implements the io.Writer interface
+*/
+func (b *Buffer) Write(p []byte) (n int, err error) {
+       b.lock.Lock()
+       defer b.lock.Unlock()
+
+       if b.closed {
+               return 0, errors.New("attempt to write to closed buffer")
+       }
+
+       b.contents = append(b.contents, p...)
+       return len(p), nil
+}
+
+/*
+Read implements the io.Reader interface. It advances the
+cursor as it reads.
+
+Returns an error if called after Close.
+*/
+func (b *Buffer) Read(d []byte) (int, error) {
+       b.lock.Lock()
+       defer b.lock.Unlock()
+
+       if b.closed {
+               return 0, errors.New("attempt to read from closed buffer")
+       }
+
+       if uint64(len(b.contents)) <= b.readCursor {
+               return 0, io.EOF
+       }
+
+       n := copy(d, b.contents[b.readCursor:])
+       b.readCursor += uint64(n)
+
+       return n, nil
+}
+
+/*
+Close signifies that the buffer will no longer be written to
+*/
+func (b *Buffer) Close() error {
+       b.lock.Lock()
+       defer b.lock.Unlock()
+
+       b.closed = true
+
+       return nil
+}
+
+/*
+Closed returns true if the buffer has been closed
+*/
+func (b *Buffer) Closed() bool {
+       b.lock.Lock()
+       defer b.lock.Unlock()
+
+       return b.closed
+}
+
+/*
+Contents returns all data ever written to the buffer.
+*/
+func (b *Buffer) Contents() []byte {
+       b.lock.Lock()
+       defer b.lock.Unlock()
+
+       contents := make([]byte, len(b.contents))
+       copy(contents, b.contents)
+       return contents
+}
+
+/*
+Detect takes a regular expression and returns a channel.
+
+The channel will receive true the first time data matching the regular expression is written to the buffer.
+The channel is subsequently closed and the buffer's read-cursor is fast-forwarded to just after the matching region.
+
+You typically don't need to use Detect and should use the ghttp.Say matcher instead.  Detect is useful, however, in cases where your code must
+be branch and handle different outputs written to the buffer.
+
+For example, consider a buffer hooked up to the stdout of a client library.  You may (or may not, depending on state outside of your control) need to authenticate the client library.
+
+You could do something like:
+
+select {
+case <-buffer.Detect("You are not logged in"):
+       //log in
+case <-buffer.Detect("Success"):
+       //carry on
+case <-time.After(time.Second):
+       //welp
+}
+buffer.CancelDetects()
+
+You should always call CancelDetects after using Detect.  This will close any channels that have not detected and clean up the goroutines that were spawned to support them.
+
+Finally, you can pass detect a format string followed by variadic arguments.  This will construct the regexp using fmt.Sprintf.
+*/
+func (b *Buffer) Detect(desired string, args ...interface{}) chan bool {
+       formattedRegexp := desired
+       if len(args) > 0 {
+               formattedRegexp = fmt.Sprintf(desired, args...)
+       }
+       re := regexp.MustCompile(formattedRegexp)
+
+       b.lock.Lock()
+       defer b.lock.Unlock()
+
+       if b.detectCloser == nil {
+               b.detectCloser = make(chan interface{})
+       }
+
+       closer := b.detectCloser
+       response := make(chan bool)
+       go func() {
+               ticker := time.NewTicker(10 * time.Millisecond)
+               defer ticker.Stop()
+               defer close(response)
+               for {
+                       select {
+                       case <-ticker.C:
+                               b.lock.Lock()
+                               data, cursor := b.contents[b.readCursor:], b.readCursor
+                               loc := re.FindIndex(data)
+                               b.lock.Unlock()
+
+                               if loc != nil {
+                                       response <- true
+                                       b.lock.Lock()
+                                       newCursorPosition := cursor + uint64(loc[1])
+                                       if newCursorPosition >= b.readCursor {
+                                               b.readCursor = newCursorPosition
+                                       }
+                                       b.lock.Unlock()
+                                       return
+                               }
+                       case <-closer:
+                               return
+                       }
+               }
+       }()
+
+       return response
+}
+
+/*
+CancelDetects cancels any pending detects and cleans up their goroutines.  You should always call this when you're done with a set of Detect channels.
+*/
+func (b *Buffer) CancelDetects() {
+       b.lock.Lock()
+       defer b.lock.Unlock()
+
+       close(b.detectCloser)
+       b.detectCloser = nil
+}
+
+func (b *Buffer) didSay(re *regexp.Regexp) (bool, []byte) {
+       b.lock.Lock()
+       defer b.lock.Unlock()
+
+       unreadBytes := b.contents[b.readCursor:]
+       copyOfUnreadBytes := make([]byte, len(unreadBytes))
+       copy(copyOfUnreadBytes, unreadBytes)
+
+       loc := re.FindIndex(unreadBytes)
+
+       if loc != nil {
+               b.readCursor += uint64(loc[1])
+               return true, copyOfUnreadBytes
+       } else {
+               return false, copyOfUnreadBytes
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/gbytes/say_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/gbytes/say_matcher.go
new file mode 100644 (file)
index 0000000..ce5ebcb
--- /dev/null
@@ -0,0 +1,105 @@
+package gbytes
+
+import (
+       "fmt"
+       "regexp"
+
+       "github.com/onsi/gomega/format"
+)
+
+//Objects satisfying the BufferProvider can be used with the Say matcher.
+type BufferProvider interface {
+       Buffer() *Buffer
+}
+
+/*
+Say is a Gomega matcher that operates on gbytes.Buffers:
+
+       Î©(buffer).Should(Say("something"))
+
+will succeed if the unread portion of the buffer matches the regular expression "something".
+
+When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match.
+Thus, subsequent calls to Say will only match against the unread portion of the buffer
+
+Say pairs very well with Eventually.  To asser that a buffer eventually receives data matching "[123]-star" within 3 seconds you can:
+
+       Eventually(buffer, 3).Should(Say("[123]-star"))
+
+Ditto with consistently.  To assert that a buffer does not receive data matching "never-see-this" for 1 second you can:
+
+       Consistently(buffer, 1).ShouldNot(Say("never-see-this"))
+
+In addition to bytes.Buffers, Say can operate on objects that implement the gbytes.BufferProvider interface.
+In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer()
+
+If the buffer is closed, the Say matcher will tell Eventually to abort.
+*/
+func Say(expected string, args ...interface{}) *sayMatcher {
+       formattedRegexp := expected
+       if len(args) > 0 {
+               formattedRegexp = fmt.Sprintf(expected, args...)
+       }
+       return &sayMatcher{
+               re: regexp.MustCompile(formattedRegexp),
+       }
+}
+
+type sayMatcher struct {
+       re              *regexp.Regexp
+       receivedSayings []byte
+}
+
+func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) {
+       var buffer *Buffer
+
+       switch x := actual.(type) {
+       case *Buffer:
+               buffer = x
+       case BufferProvider:
+               buffer = x.Buffer()
+       default:
+               return nil, false
+       }
+
+       return buffer, true
+}
+
+func (m *sayMatcher) Match(actual interface{}) (success bool, err error) {
+       buffer, ok := m.buffer(actual)
+       if !ok {
+               return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       didSay, sayings := buffer.didSay(m.re)
+       m.receivedSayings = sayings
+
+       return didSay, nil
+}
+
+func (m *sayMatcher) FailureMessage(actual interface{}) (message string) {
+       return fmt.Sprintf(
+               "Got stuck at:\n%s\nWaiting for:\n%s",
+               format.IndentString(string(m.receivedSayings), 1),
+               format.IndentString(m.re.String(), 1),
+       )
+}
+
+func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return fmt.Sprintf(
+               "Saw:\n%s\nWhich matches the unexpected:\n%s",
+               format.IndentString(string(m.receivedSayings), 1),
+               format.IndentString(m.re.String(), 1),
+       )
+}
+
+func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+       switch x := actual.(type) {
+       case *Buffer:
+               return !x.Closed()
+       case BufferProvider:
+               return !x.Buffer().Closed()
+       default:
+               return true
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/gexec/build.go b/Godeps/_workspace/src/github.com/onsi/gomega/gexec/build.go
new file mode 100644 (file)
index 0000000..3e9bf9f
--- /dev/null
@@ -0,0 +1,78 @@
+package gexec
+
+import (
+       "errors"
+       "fmt"
+       "io/ioutil"
+       "os"
+       "os/exec"
+       "path"
+       "path/filepath"
+       "runtime"
+)
+
+var tmpDir string
+
+/*
+Build uses go build to compile the package at packagePath.  The resulting binary is saved off in a temporary directory.
+A path pointing to this binary is returned.
+
+Build uses the $GOPATH set in your environment.  It passes the variadic args on to `go build`.
+*/
+func Build(packagePath string, args ...string) (compiledPath string, err error) {
+       return BuildIn(os.Getenv("GOPATH"), packagePath, args...)
+}
+
+/*
+BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument).
+*/
+func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) {
+       tmpDir, err := temporaryDirectory()
+       if err != nil {
+               return "", err
+       }
+
+       if len(gopath) == 0 {
+               return "", errors.New("$GOPATH not provided when building " + packagePath)
+       }
+
+       executable := filepath.Join(tmpDir, path.Base(packagePath))
+       if runtime.GOOS == "windows" {
+               executable = executable + ".exe"
+       }
+
+       cmdArgs := append([]string{"build"}, args...)
+       cmdArgs = append(cmdArgs, "-o", executable, packagePath)
+
+       build := exec.Command("go", cmdArgs...)
+       build.Env = append([]string{"GOPATH=" + gopath}, os.Environ()...)
+
+       output, err := build.CombinedOutput()
+       if err != nil {
+               return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output))
+       }
+
+       return executable, nil
+}
+
+/*
+You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by
+gexec. In Ginkgo this is typically done in an AfterSuite callback.
+*/
+func CleanupBuildArtifacts() {
+       if tmpDir != "" {
+               os.RemoveAll(tmpDir)
+       }
+}
+
+func temporaryDirectory() (string, error) {
+       var err error
+       if tmpDir == "" {
+               tmpDir, err = ioutil.TempDir("", "gexec_artifacts")
+               if err != nil {
+                       return "", err
+               }
+       }
+
+       return ioutil.TempDir(tmpDir, "g")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/gexec/exit_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/gexec/exit_matcher.go
new file mode 100644 (file)
index 0000000..e6f4329
--- /dev/null
@@ -0,0 +1,88 @@
+package gexec
+
+import (
+       "fmt"
+
+       "github.com/onsi/gomega/format"
+)
+
+/*
+The Exit matcher operates on a session:
+
+       Î©(session).Should(Exit(<optional status code>))
+
+Exit passes if the session has already exited.
+
+If no status code is provided, then Exit will succeed if the session has exited regardless of exit code.
+Otherwise, Exit will only succeed if the process has exited with the provided status code.
+
+Note that the process must have already exited.  To wait for a process to exit, use Eventually:
+
+       Eventually(session, 3).Should(Exit(0))
+*/
+func Exit(optionalExitCode ...int) *exitMatcher {
+       exitCode := -1
+       if len(optionalExitCode) > 0 {
+               exitCode = optionalExitCode[0]
+       }
+
+       return &exitMatcher{
+               exitCode: exitCode,
+       }
+}
+
+type exitMatcher struct {
+       exitCode       int
+       didExit        bool
+       actualExitCode int
+}
+
+type Exiter interface {
+       ExitCode() int
+}
+
+func (m *exitMatcher) Match(actual interface{}) (success bool, err error) {
+       exiter, ok := actual.(Exiter)
+       if !ok {
+               return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1))
+       }
+
+       m.actualExitCode = exiter.ExitCode()
+
+       if m.actualExitCode == -1 {
+               return false, nil
+       }
+
+       if m.exitCode == -1 {
+               return true, nil
+       }
+       return m.exitCode == m.actualExitCode, nil
+}
+
+func (m *exitMatcher) FailureMessage(actual interface{}) (message string) {
+       if m.actualExitCode == -1 {
+               return "Expected process to exit.  It did not."
+       } else {
+               return format.Message(m.actualExitCode, "to match exit code:", m.exitCode)
+       }
+}
+
+func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       if m.actualExitCode == -1 {
+               return "you really shouldn't be able to see this!"
+       } else {
+               if m.exitCode == -1 {
+                       return "Expected process not to exit.  It did."
+               } else {
+                       return format.Message(m.actualExitCode, "not to match exit code:", m.exitCode)
+               }
+       }
+}
+
+func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+       session, ok := actual.(*Session)
+       if ok {
+               return session.ExitCode() == -1
+       }
+       return true
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/gexec/prefixed_writer.go b/Godeps/_workspace/src/github.com/onsi/gomega/gexec/prefixed_writer.go
new file mode 100644 (file)
index 0000000..05e695a
--- /dev/null
@@ -0,0 +1,53 @@
+package gexec
+
+import (
+       "io"
+       "sync"
+)
+
+/*
+PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line.
+This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each
+session by passing in a PrefixedWriter:
+
+gexec.Start(cmd, NewPrefixedWriter("[my-cmd] ", GinkgoWriter), NewPrefixedWriter("[my-cmd] ", GinkgoWriter))
+*/
+type PrefixedWriter struct {
+       prefix        []byte
+       writer        io.Writer
+       lock          *sync.Mutex
+       atStartOfLine bool
+}
+
+func NewPrefixedWriter(prefix string, writer io.Writer) *PrefixedWriter {
+       return &PrefixedWriter{
+               prefix:        []byte(prefix),
+               writer:        writer,
+               lock:          &sync.Mutex{},
+               atStartOfLine: true,
+       }
+}
+
+func (w *PrefixedWriter) Write(b []byte) (int, error) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       toWrite := []byte{}
+
+       for _, c := range b {
+               if w.atStartOfLine {
+                       toWrite = append(toWrite, w.prefix...)
+               }
+
+               toWrite = append(toWrite, c)
+
+               w.atStartOfLine = c == '\n'
+       }
+
+       _, err := w.writer.Write(toWrite)
+       if err != nil {
+               return 0, err
+       }
+
+       return len(b), nil
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/gexec/session.go b/Godeps/_workspace/src/github.com/onsi/gomega/gexec/session.go
new file mode 100644 (file)
index 0000000..46e7122
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+Package gexec provides support for testing external processes.
+*/
+package gexec
+
+import (
+       "io"
+       "os"
+       "os/exec"
+       "reflect"
+       "sync"
+       "syscall"
+
+       . "github.com/onsi/gomega"
+       "github.com/onsi/gomega/gbytes"
+)
+
+const INVALID_EXIT_CODE = 254
+
+type Session struct {
+       //The wrapped command
+       Command *exec.Cmd
+
+       //A *gbytes.Buffer connected to the command's stdout
+       Out *gbytes.Buffer
+
+       //A *gbytes.Buffer connected to the command's stderr
+       Err *gbytes.Buffer
+
+       //A channel that will close when the command exits
+       Exited <-chan struct{}
+
+       lock     *sync.Mutex
+       exitCode int
+}
+
+/*
+Start starts the passed-in *exec.Cmd command.  It wraps the command in a *gexec.Session.
+
+The session pipes the command's stdout and stderr to two *gbytes.Buffers available as properties on the session: session.Out and session.Err.
+These buffers can be used with the gbytes.Say matcher to match against unread output:
+
+       Î©(session.Out).Should(gbytes.Say("foo-out"))
+       Î©(session.Err).Should(gbytes.Say("foo-err"))
+
+In addition, Session satisfies the gbytes.BufferProvider interface and provides the stdout *gbytes.Buffer.  This allows you to replace the first line, above, with:
+
+       Î©(session).Should(gbytes.Say("foo-out"))
+
+When outWriter and/or errWriter are non-nil, the session will pipe stdout and/or stderr output both into the session *gybtes.Buffers and to the passed-in outWriter/errWriter.
+This is useful for capturing the process's output or logging it to screen.  In particular, when using Ginkgo it can be convenient to direct output to the GinkgoWriter:
+
+       session, err := Start(command, GinkgoWriter, GinkgoWriter)
+
+This will log output when running tests in verbose mode, but - otherwise - will only log output when a test fails.
+
+The session wrapper is responsible for waiting on the *exec.Cmd command.  You *should not* call command.Wait() yourself.
+Instead, to assert that the command has exited you can use the gexec.Exit matcher:
+
+       Î©(session).Should(gexec.Exit())
+
+When the session exits it closes the stdout and stderr gbytes buffers.  This will short circuit any
+Eventuallys waiting fo the buffers to Say something.
+*/
+func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Session, error) {
+       exited := make(chan struct{})
+
+       session := &Session{
+               Command:  command,
+               Out:      gbytes.NewBuffer(),
+               Err:      gbytes.NewBuffer(),
+               Exited:   exited,
+               lock:     &sync.Mutex{},
+               exitCode: -1,
+       }
+
+       var commandOut, commandErr io.Writer
+
+       commandOut, commandErr = session.Out, session.Err
+
+       if outWriter != nil && !reflect.ValueOf(outWriter).IsNil() {
+               commandOut = io.MultiWriter(commandOut, outWriter)
+       }
+
+       if errWriter != nil && !reflect.ValueOf(errWriter).IsNil() {
+               commandErr = io.MultiWriter(commandErr, errWriter)
+       }
+
+       command.Stdout = commandOut
+       command.Stderr = commandErr
+
+       err := command.Start()
+       if err == nil {
+               go session.monitorForExit(exited)
+       }
+
+       return session, err
+}
+
+/*
+Buffer implements the gbytes.BufferProvider interface and returns s.Out
+This allows you to make gbytes.Say matcher assertions against stdout without having to reference .Out:
+
+       Eventually(session).Should(gbytes.Say("foo"))
+*/
+func (s *Session) Buffer() *gbytes.Buffer {
+       return s.Out
+}
+
+/*
+ExitCode returns the wrapped command's exit code.  If the command hasn't exited yet, ExitCode returns -1.
+
+To assert that the command has exited it is more convenient to use the Exit matcher:
+
+       Eventually(s).Should(gexec.Exit())
+
+When the process exits because it has received a particular signal, the exit code will be 128+signal-value
+(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html)
+
+*/
+func (s *Session) ExitCode() int {
+       s.lock.Lock()
+       defer s.lock.Unlock()
+       return s.exitCode
+}
+
+/*
+Wait waits until the wrapped command exits.  It can be passed an optional timeout.
+If the command does not exit within the timeout, Wait will trigger a test failure.
+
+Wait returns the session, making it possible to chain:
+
+       session.Wait().Out.Contents()
+
+will wait for the command to exit then return the entirety of Out's contents.
+
+Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does.
+*/
+func (s *Session) Wait(timeout ...interface{}) *Session {
+       EventuallyWithOffset(1, s, timeout...).Should(Exit())
+       return s
+}
+
+/*
+Kill sends the running command a SIGKILL signal.  It does not wait for the process to exit.
+
+If the command has already exited, Kill returns silently.
+
+The session is returned to enable chaining.
+*/
+func (s *Session) Kill() *Session {
+       if s.ExitCode() != -1 {
+               return s
+       }
+       s.Command.Process.Kill()
+       return s
+}
+
+/*
+Interrupt sends the running command a SIGINT signal.  It does not wait for the process to exit.
+
+If the command has already exited, Interrupt returns silently.
+
+The session is returned to enable chaining.
+*/
+func (s *Session) Interrupt() *Session {
+       return s.Signal(syscall.SIGINT)
+}
+
+/*
+Terminate sends the running command a SIGTERM signal.  It does not wait for the process to exit.
+
+If the command has already exited, Terminate returns silently.
+
+The session is returned to enable chaining.
+*/
+func (s *Session) Terminate() *Session {
+       return s.Signal(syscall.SIGTERM)
+}
+
+/*
+Terminate sends the running command the passed in signal.  It does not wait for the process to exit.
+
+If the command has already exited, Signal returns silently.
+
+The session is returned to enable chaining.
+*/
+func (s *Session) Signal(signal os.Signal) *Session {
+       if s.ExitCode() != -1 {
+               return s
+       }
+       s.Command.Process.Signal(signal)
+       return s
+}
+
+func (s *Session) monitorForExit(exited chan<- struct{}) {
+       err := s.Command.Wait()
+       s.lock.Lock()
+       s.Out.Close()
+       s.Err.Close()
+       status := s.Command.ProcessState.Sys().(syscall.WaitStatus)
+       if status.Signaled() {
+               s.exitCode = 128 + int(status.Signal())
+       } else {
+               exitStatus := status.ExitStatus()
+               if exitStatus == -1 && err != nil {
+                       s.exitCode = INVALID_EXIT_CODE
+               }
+               s.exitCode = exitStatus
+       }
+       s.lock.Unlock()
+
+       close(exited)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/ghttp/handlers.go b/Godeps/_workspace/src/github.com/onsi/gomega/ghttp/handlers.go
new file mode 100644 (file)
index 0000000..63ff691
--- /dev/null
@@ -0,0 +1,313 @@
+package ghttp
+
+import (
+       "encoding/base64"
+       "encoding/json"
+       "fmt"
+       "io/ioutil"
+       "net/http"
+       "net/url"
+       "reflect"
+
+       "github.com/golang/protobuf/proto"
+       . "github.com/onsi/gomega"
+       "github.com/onsi/gomega/types"
+)
+
+//CombineHandler takes variadic list of handlers and produces one handler
+//that calls each handler in order.
+func CombineHandlers(handlers ...http.HandlerFunc) http.HandlerFunc {
+       return func(w http.ResponseWriter, req *http.Request) {
+               for _, handler := range handlers {
+                       handler(w, req)
+               }
+       }
+}
+
+//VerifyRequest returns a handler that verifies that a request uses the specified method to connect to the specified path
+//You may also pass in an optional rawQuery string which is tested against the request's `req.URL.RawQuery`
+//
+//For path, you may pass in a string, in which case strict equality will be applied
+//Alternatively you can pass in a matcher (ContainSubstring("/foo") and MatchRegexp("/foo/[a-f0-9]+") for example)
+func VerifyRequest(method string, path interface{}, rawQuery ...string) http.HandlerFunc {
+       return func(w http.ResponseWriter, req *http.Request) {
+               Î©(req.Method).Should(Equal(method), "Method mismatch")
+               switch p := path.(type) {
+               case types.GomegaMatcher:
+                       Î©(req.URL.Path).Should(p, "Path mismatch")
+               default:
+                       Î©(req.URL.Path).Should(Equal(path), "Path mismatch")
+               }
+               if len(rawQuery) > 0 {
+                       values, err := url.ParseQuery(rawQuery[0])
+                       Î©(err).ShouldNot(HaveOccurred(), "Expected RawQuery is malformed")
+
+                       Î©(req.URL.Query()).Should(Equal(values), "RawQuery mismatch")
+               }
+       }
+}
+
+//VerifyContentType returns a handler that verifies that a request has a Content-Type header set to the
+//specified value
+func VerifyContentType(contentType string) http.HandlerFunc {
+       return func(w http.ResponseWriter, req *http.Request) {
+               Î©(req.Header.Get("Content-Type")).Should(Equal(contentType))
+       }
+}
+
+//VerifyBasicAuth returns a handler that verifies the request contains a BasicAuth Authorization header
+//matching the passed in username and password
+func VerifyBasicAuth(username string, password string) http.HandlerFunc {
+       return func(w http.ResponseWriter, req *http.Request) {
+               auth := req.Header.Get("Authorization")
+               Î©(auth).ShouldNot(Equal(""), "Authorization header must be specified")
+
+               decoded, err := base64.StdEncoding.DecodeString(auth[6:])
+               Î©(err).ShouldNot(HaveOccurred())
+
+               Î©(string(decoded)).Should(Equal(fmt.Sprintf("%s:%s", username, password)), "Authorization mismatch")
+       }
+}
+
+//VerifyHeader returns a handler that verifies the request contains the passed in headers.
+//The passed in header keys are first canonicalized via http.CanonicalHeaderKey.
+//
+//The request must contain *all* the passed in headers, but it is allowed to have additional headers
+//beyond the passed in set.
+func VerifyHeader(header http.Header) http.HandlerFunc {
+       return func(w http.ResponseWriter, req *http.Request) {
+               for key, values := range header {
+                       key = http.CanonicalHeaderKey(key)
+                       Î©(req.Header[key]).Should(Equal(values), "Header mismatch for key: %s", key)
+               }
+       }
+}
+
+//VerifyHeaderKV returns a handler that verifies the request contains a header matching the passed in key and values
+//(recall that a `http.Header` is a mapping from string (key) to []string (values))
+//It is a convenience wrapper around `VerifyHeader` that allows you to avoid having to create an `http.Header` object.
+func VerifyHeaderKV(key string, values ...string) http.HandlerFunc {
+       return VerifyHeader(http.Header{key: values})
+}
+
+//VerifyBody returns a handler that verifies that the body of the request matches the passed in byte array.
+//It does this using Equal().
+func VerifyBody(expectedBody []byte) http.HandlerFunc {
+       return CombineHandlers(
+               func(w http.ResponseWriter, req *http.Request) {
+                       body, err := ioutil.ReadAll(req.Body)
+                       req.Body.Close()
+                       Î©(err).ShouldNot(HaveOccurred())
+                       Î©(body).Should(Equal(expectedBody), "Body Mismatch")
+               },
+       )
+}
+
+//VerifyJSON returns a handler that verifies that the body of the request is a valid JSON representation
+//matching the passed in JSON string.  It does this using Gomega's MatchJSON method
+//
+//VerifyJSON also verifies that the request's content type is application/json
+func VerifyJSON(expectedJSON string) http.HandlerFunc {
+       return CombineHandlers(
+               VerifyContentType("application/json"),
+               func(w http.ResponseWriter, req *http.Request) {
+                       body, err := ioutil.ReadAll(req.Body)
+                       req.Body.Close()
+                       Î©(err).ShouldNot(HaveOccurred())
+                       Î©(body).Should(MatchJSON(expectedJSON), "JSON Mismatch")
+               },
+       )
+}
+
+//VerifyJSONRepresenting is similar to VerifyJSON.  Instead of taking a JSON string, however, it
+//takes an arbitrary JSON-encodable object and verifies that the requests's body is a JSON representation
+//that matches the object
+func VerifyJSONRepresenting(object interface{}) http.HandlerFunc {
+       data, err := json.Marshal(object)
+       Î©(err).ShouldNot(HaveOccurred())
+       return CombineHandlers(
+               VerifyContentType("application/json"),
+               VerifyJSON(string(data)),
+       )
+}
+
+//VerifyForm returns a handler that verifies a request contains the specified form values.
+//
+//The request must contain *all* of the specified values, but it is allowed to have additional
+//form values beyond the passed in set.
+func VerifyForm(values url.Values) http.HandlerFunc {
+       return func(w http.ResponseWriter, r *http.Request) {
+               err := r.ParseForm()
+               Î©(err).ShouldNot(HaveOccurred())
+               for key, vals := range values {
+                       Î©(r.Form[key]).Should(Equal(vals), "Form mismatch for key: %s", key)
+               }
+       }
+}
+
+//VerifyFormKV returns a handler that verifies a request contains a form key with the specified values.
+//
+//It is a convenience wrapper around `VerifyForm` that lets you avoid having to create a `url.Values` object.
+func VerifyFormKV(key string, values ...string) http.HandlerFunc {
+       return VerifyForm(url.Values{key: values})
+}
+
+//VerifyProtoRepresenting returns a handler that verifies that the body of the request is a valid protobuf
+//representation of the passed message.
+//
+//VerifyProtoRepresenting also verifies that the request's content type is application/x-protobuf
+func VerifyProtoRepresenting(expected proto.Message) http.HandlerFunc {
+       return CombineHandlers(
+               VerifyContentType("application/x-protobuf"),
+               func(w http.ResponseWriter, req *http.Request) {
+                       body, err := ioutil.ReadAll(req.Body)
+                       Î©(err).ShouldNot(HaveOccurred())
+                       req.Body.Close()
+
+                       expectedType := reflect.TypeOf(expected)
+                       actualValuePtr := reflect.New(expectedType.Elem())
+
+                       actual, ok := actualValuePtr.Interface().(proto.Message)
+                       Î©(ok).Should(BeTrue(), "Message value is not a proto.Message")
+
+                       err = proto.Unmarshal(body, actual)
+                       Î©(err).ShouldNot(HaveOccurred(), "Failed to unmarshal protobuf")
+
+                       Î©(actual).Should(Equal(expected), "ProtoBuf Mismatch")
+               },
+       )
+}
+
+func copyHeader(src http.Header, dst http.Header) {
+       for key, value := range src {
+               dst[key] = value
+       }
+}
+
+/*
+RespondWith returns a handler that responds to a request with the specified status code and body
+
+Body may be a string or []byte
+
+Also, RespondWith can be given an optional http.Header.  The headers defined therein will be added to the response headers.
+*/
+func RespondWith(statusCode int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc {
+       return func(w http.ResponseWriter, req *http.Request) {
+               if len(optionalHeader) == 1 {
+                       copyHeader(optionalHeader[0], w.Header())
+               }
+               w.WriteHeader(statusCode)
+               switch x := body.(type) {
+               case string:
+                       w.Write([]byte(x))
+               case []byte:
+                       w.Write(x)
+               default:
+                       Î©(body).Should(BeNil(), "Invalid type for body.  Should be string or []byte.")
+               }
+       }
+}
+
+/*
+RespondWithPtr returns a handler that responds to a request with the specified status code and body
+
+Unlike RespondWith, you pass RepondWithPtr a pointer to the status code and body allowing different tests
+to share the same setup but specify different status codes and bodies.
+
+Also, RespondWithPtr can be given an optional http.Header.  The headers defined therein will be added to the response headers.
+Since the http.Header can be mutated after the fact you don't need to pass in a pointer.
+*/
+func RespondWithPtr(statusCode *int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc {
+       return func(w http.ResponseWriter, req *http.Request) {
+               if len(optionalHeader) == 1 {
+                       copyHeader(optionalHeader[0], w.Header())
+               }
+               w.WriteHeader(*statusCode)
+               if body != nil {
+                       switch x := (body).(type) {
+                       case *string:
+                               w.Write([]byte(*x))
+                       case *[]byte:
+                               w.Write(*x)
+                       default:
+                               Î©(body).Should(BeNil(), "Invalid type for body.  Should be string or []byte.")
+                       }
+               }
+       }
+}
+
+/*
+RespondWithJSONEncoded returns a handler that responds to a request with the specified status code and a body
+containing the JSON-encoding of the passed in object
+
+Also, RespondWithJSONEncoded can be given an optional http.Header.  The headers defined therein will be added to the response headers.
+*/
+func RespondWithJSONEncoded(statusCode int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc {
+       data, err := json.Marshal(object)
+       Î©(err).ShouldNot(HaveOccurred())
+
+       var headers http.Header
+       if len(optionalHeader) == 1 {
+               headers = optionalHeader[0]
+       } else {
+               headers = make(http.Header)
+       }
+       if _, found := headers["Content-Type"]; !found {
+               headers["Content-Type"] = []string{"application/json"}
+       }
+       return RespondWith(statusCode, string(data), headers)
+}
+
+/*
+RespondWithJSONEncodedPtr behaves like RespondWithJSONEncoded but takes a pointer
+to a status code and object.
+
+This allows different tests to share the same setup but specify different status codes and JSON-encoded
+objects.
+
+Also, RespondWithJSONEncodedPtr can be given an optional http.Header.  The headers defined therein will be added to the response headers.
+Since the http.Header can be mutated after the fact you don't need to pass in a pointer.
+*/
+func RespondWithJSONEncodedPtr(statusCode *int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc {
+       return func(w http.ResponseWriter, req *http.Request) {
+               data, err := json.Marshal(object)
+               Î©(err).ShouldNot(HaveOccurred())
+               var headers http.Header
+               if len(optionalHeader) == 1 {
+                       headers = optionalHeader[0]
+               } else {
+                       headers = make(http.Header)
+               }
+               if _, found := headers["Content-Type"]; !found {
+                       headers["Content-Type"] = []string{"application/json"}
+               }
+               copyHeader(headers, w.Header())
+               w.WriteHeader(*statusCode)
+               w.Write(data)
+       }
+}
+
+//RespondWithProto returns a handler that responds to a request with the specified status code and a body
+//containing the protobuf serialization of the provided message.
+//
+//Also, RespondWithProto can be given an optional http.Header.  The headers defined therein will be added to the response headers.
+func RespondWithProto(statusCode int, message proto.Message, optionalHeader ...http.Header) http.HandlerFunc {
+       return func(w http.ResponseWriter, req *http.Request) {
+               data, err := proto.Marshal(message)
+               Î©(err).ShouldNot(HaveOccurred())
+
+               var headers http.Header
+               if len(optionalHeader) == 1 {
+                       headers = optionalHeader[0]
+               } else {
+                       headers = make(http.Header)
+               }
+               if _, found := headers["Content-Type"]; !found {
+                       headers["Content-Type"] = []string{"application/x-protobuf"}
+               }
+               copyHeader(headers, w.Header())
+
+               w.WriteHeader(statusCode)
+               w.Write(data)
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/protobuf.go b/Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/protobuf.go
new file mode 100644 (file)
index 0000000..b2972bc
--- /dev/null
@@ -0,0 +1,3 @@
+package protobuf
+
+//go:generate protoc --go_out=. simple_message.proto
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go b/Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go
new file mode 100644 (file)
index 0000000..c55a484
--- /dev/null
@@ -0,0 +1,55 @@
+// Code generated by protoc-gen-go.
+// source: simple_message.proto
+// DO NOT EDIT!
+
+/*
+Package protobuf is a generated protocol buffer package.
+
+It is generated from these files:
+       simple_message.proto
+
+It has these top-level messages:
+       SimpleMessage
+*/
+package protobuf
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type SimpleMessage struct {
+       Description      *string `protobuf:"bytes,1,req,name=description" json:"description,omitempty"`
+       Id               *int32  `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+       Metadata         *string `protobuf:"bytes,3,opt,name=metadata" json:"metadata,omitempty"`
+       XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *SimpleMessage) Reset()         { *m = SimpleMessage{} }
+func (m *SimpleMessage) String() string { return proto.CompactTextString(m) }
+func (*SimpleMessage) ProtoMessage()    {}
+
+func (m *SimpleMessage) GetDescription() string {
+       if m != nil && m.Description != nil {
+               return *m.Description
+       }
+       return ""
+}
+
+func (m *SimpleMessage) GetId() int32 {
+       if m != nil && m.Id != nil {
+               return *m.Id
+       }
+       return 0
+}
+
+func (m *SimpleMessage) GetMetadata() string {
+       if m != nil && m.Metadata != nil {
+               return *m.Metadata
+       }
+       return ""
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto b/Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto
new file mode 100644 (file)
index 0000000..35b7145
--- /dev/null
@@ -0,0 +1,9 @@
+syntax = "proto2";
+
+package protobuf;
+
+message SimpleMessage {
+    required string description = 1;
+    required int32 id = 2;
+    optional string metadata = 3;
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/ghttp/test_server.go b/Godeps/_workspace/src/github.com/onsi/gomega/ghttp/test_server.go
new file mode 100644 (file)
index 0000000..fde65be
--- /dev/null
@@ -0,0 +1,368 @@
+/*
+Package ghttp supports testing HTTP clients by providing a test server (simply a thin wrapper around httptest's server) that supports
+registering multiple handlers.  Incoming requests are not routed between the different handlers
+- rather it is merely the order of the handlers that matters.  The first request is handled by the first
+registered handler, the second request by the second handler, etc.
+
+The intent here is to have each handler *verify* that the incoming request is valid.  To accomplish, ghttp
+also provides a collection of bite-size handlers that each perform one aspect of request verification.  These can
+be composed together and registered with a ghttp server.  The result is an expressive language for describing
+the requests generated by the client under test.
+
+Here's a simple example, note that the server handler is only defined in one BeforeEach and then modified, as required, by the nested BeforeEaches.
+A more comprehensive example is available at https://onsi.github.io/gomega/#_testing_http_clients
+
+       var _ = Describe("A Sprockets Client", func() {
+               var server *ghttp.Server
+               var client *SprocketClient
+               BeforeEach(func() {
+                       server = ghttp.NewServer()
+                       client = NewSprocketClient(server.URL(), "skywalker", "tk427")
+               })
+
+               AfterEach(func() {
+                       server.Close()
+               })
+
+               Describe("fetching sprockets", func() {
+                       var statusCode int
+                       var sprockets []Sprocket
+                       BeforeEach(func() {
+                               statusCode = http.StatusOK
+                               sprockets = []Sprocket{}
+                               server.AppendHandlers(ghttp.CombineHandlers(
+                                       ghttp.VerifyRequest("GET", "/sprockets"),
+                                       ghttp.VerifyBasicAuth("skywalker", "tk427"),
+                                       ghttp.RespondWithJSONEncodedPtr(&statusCode, &sprockets),
+                               ))
+                       })
+
+                       Context("when requesting all sprockets", func() {
+                               Context("when the response is succesful", func() {
+                                       BeforeEach(func() {
+                                               sprockets = []Sprocket{
+                                                       NewSprocket("Alfalfa"),
+                                                       NewSprocket("Banana"),
+                                               }
+                                       })
+
+                                       It("should return the returned sprockets", func() {
+                                               Î©(client.Sprockets()).Should(Equal(sprockets))
+                                       })
+                               })
+
+                               Context("when the response is missing", func() {
+                                       BeforeEach(func() {
+                                               statusCode = http.StatusNotFound
+                                       })
+
+                                       It("should return an empty list of sprockets", func() {
+                                               Î©(client.Sprockets()).Should(BeEmpty())
+                                       })
+                               })
+
+                               Context("when the response fails to authenticate", func() {
+                                       BeforeEach(func() {
+                                               statusCode = http.StatusUnauthorized
+                                       })
+
+                                       It("should return an AuthenticationError error", func() {
+                                               sprockets, err := client.Sprockets()
+                                               Î©(sprockets).Should(BeEmpty())
+                                               Î©(err).Should(MatchError(AuthenticationError))
+                                       })
+                               })
+
+                               Context("when the response is a server failure", func() {
+                                       BeforeEach(func() {
+                                               statusCode = http.StatusInternalServerError
+                                       })
+
+                                       It("should return an InternalError error", func() {
+                                               sprockets, err := client.Sprockets()
+                                               Î©(sprockets).Should(BeEmpty())
+                                               Î©(err).Should(MatchError(InternalError))
+                                       })
+                               })
+                       })
+
+                       Context("when requesting some sprockets", func() {
+                               BeforeEach(func() {
+                                       sprockets = []Sprocket{
+                                               NewSprocket("Alfalfa"),
+                                               NewSprocket("Banana"),
+                                       }
+
+                                       server.WrapHandler(0, ghttp.VerifyRequest("GET", "/sprockets", "filter=FOOD"))
+                               })
+
+                               It("should make the request with a filter", func() {
+                                       Î©(client.Sprockets("food")).Should(Equal(sprockets))
+                               })
+                       })
+               })
+       })
+*/
+package ghttp
+
+import (
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "net/http/httptest"
+       "reflect"
+       "regexp"
+       "strings"
+       "sync"
+
+       . "github.com/onsi/gomega"
+)
+
+func new() *Server {
+       return &Server{
+               AllowUnhandledRequests:     false,
+               UnhandledRequestStatusCode: http.StatusInternalServerError,
+               writeLock:                  &sync.Mutex{},
+       }
+}
+
+type routedHandler struct {
+       method     string
+       pathRegexp *regexp.Regexp
+       path       string
+       handler    http.HandlerFunc
+}
+
+// NewServer returns a new `*ghttp.Server` that wraps an `httptest` server.  The server is started automatically.
+func NewServer() *Server {
+       s := new()
+       s.HTTPTestServer = httptest.NewServer(s)
+       return s
+}
+
+// NewUnstartedServer return a new, unstarted, `*ghttp.Server`.  Useful for specifying a custom listener on `server.HTTPTestServer`.
+func NewUnstartedServer() *Server {
+       s := new()
+       s.HTTPTestServer = httptest.NewUnstartedServer(s)
+       return s
+}
+
+// NewTLSServer returns a new `*ghttp.Server` that wraps an `httptest` TLS server.  The server is started automatically.
+func NewTLSServer() *Server {
+       s := new()
+       s.HTTPTestServer = httptest.NewTLSServer(s)
+       return s
+}
+
+type Server struct {
+       //The underlying httptest server
+       HTTPTestServer *httptest.Server
+
+       //Defaults to false.  If set to true, the Server will allow more requests than there are registered handlers.
+       AllowUnhandledRequests bool
+
+       //The status code returned when receiving an unhandled request.
+       //Defaults to http.StatusInternalServerError.
+       //Only applies if AllowUnhandledRequests is true
+       UnhandledRequestStatusCode int
+
+       //If provided, ghttp will log about each request received to the provided io.Writer
+       //Defaults to nil
+       //If you're using Ginkgo, set this to GinkgoWriter to get improved output during failures
+       Writer io.Writer
+
+       receivedRequests []*http.Request
+       requestHandlers  []http.HandlerFunc
+       routedHandlers   []routedHandler
+
+       writeLock *sync.Mutex
+       calls     int
+}
+
+//Start() starts an unstarted ghttp server.  It is a catastrophic error to call Start more than once (thanks, httptest).
+func (s *Server) Start() {
+       s.HTTPTestServer.Start()
+}
+
+//URL() returns a url that will hit the server
+func (s *Server) URL() string {
+       return s.HTTPTestServer.URL
+}
+
+//Addr() returns the address on which the server is listening.
+func (s *Server) Addr() string {
+       return s.HTTPTestServer.Listener.Addr().String()
+}
+
+//Close() should be called at the end of each test.  It spins down and cleans up the test server.
+func (s *Server) Close() {
+       s.writeLock.Lock()
+       defer s.writeLock.Unlock()
+
+       server := s.HTTPTestServer
+       s.HTTPTestServer = nil
+       server.Close()
+}
+
+//ServeHTTP() makes Server an http.Handler
+//When the server receives a request it handles the request in the following order:
+//
+//1. If the request matches a handler registered with RouteToHandler, that handler is called.
+//2. Otherwise, if there are handlers registered via AppendHandlers, those handlers are called in order.
+//3. If all registered handlers have been called then:
+//   a) If AllowUnhandledRequests is true, the request will be handled with response code of UnhandledRequestStatusCode
+//   b) If AllowUnhandledRequests is false, the request will not be handled and the current test will be marked as failed.
+func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+       s.writeLock.Lock()
+       defer func() {
+               e := recover()
+               if e != nil {
+                       w.WriteHeader(http.StatusInternalServerError)
+               }
+
+               //If the handler panics GHTTP will silently succeed.  This is badâ„¢.
+               //To catch this case we need to fail the test if the handler has panicked.
+               //However, if the handler is panicking because Ginkgo's causing it to panic (i.e. an asswertion failed)
+               //then we shouldn't double-report the error as this will confuse people.
+
+               //So: step 1, if this is a Ginkgo panic - do nothing, Ginkgo's aware of the failure
+               eAsString, ok := e.(string)
+               if ok && strings.Contains(eAsString, "defer GinkgoRecover()") {
+                       return
+               }
+
+               //If we're here, we have to do step 2: assert that the error is nil.  This assertion will
+               //allow us to fail the test suite (note: we can't call Fail since Gomega is not allowed to import Ginkgo).
+               //Since a failed assertion throws a panic, and we are likely in a goroutine, we need to defer within our defer!
+               defer func() {
+                       recover()
+               }()
+               Î©(e).Should(BeNil(), "Handler Panicked")
+       }()
+
+       if s.Writer != nil {
+               s.Writer.Write([]byte(fmt.Sprintf("GHTTP Received Request: %s - %s\n", req.Method, req.URL)))
+       }
+
+       s.receivedRequests = append(s.receivedRequests, req)
+       if routedHandler, ok := s.handlerForRoute(req.Method, req.URL.Path); ok {
+               s.writeLock.Unlock()
+               routedHandler(w, req)
+       } else if s.calls < len(s.requestHandlers) {
+               h := s.requestHandlers[s.calls]
+               s.calls++
+               s.writeLock.Unlock()
+               h(w, req)
+       } else {
+               s.writeLock.Unlock()
+               if s.AllowUnhandledRequests {
+                       ioutil.ReadAll(req.Body)
+                       req.Body.Close()
+                       w.WriteHeader(s.UnhandledRequestStatusCode)
+               } else {
+                       Î©(req).Should(BeNil(), "Received Unhandled Request")
+               }
+       }
+}
+
+//ReceivedRequests is an array containing all requests received by the server (both handled and unhandled requests)
+func (s *Server) ReceivedRequests() []*http.Request {
+       s.writeLock.Lock()
+       defer s.writeLock.Unlock()
+
+       return s.receivedRequests
+}
+
+//RouteToHandler can be used to register handlers that will always handle requests that match
+//the passed in method and path.
+//
+//The path may be either a string object or a *regexp.Regexp.
+func (s *Server) RouteToHandler(method string, path interface{}, handler http.HandlerFunc) {
+       s.writeLock.Lock()
+       defer s.writeLock.Unlock()
+
+       rh := routedHandler{
+               method:  method,
+               handler: handler,
+       }
+
+       switch p := path.(type) {
+       case *regexp.Regexp:
+               rh.pathRegexp = p
+       case string:
+               rh.path = p
+       default:
+               panic("path must be a string or a regular expression")
+       }
+
+       for i, existingRH := range s.routedHandlers {
+               if existingRH.method == method &&
+                       reflect.DeepEqual(existingRH.pathRegexp, rh.pathRegexp) &&
+                       existingRH.path == rh.path {
+                       s.routedHandlers[i] = rh
+                       return
+               }
+       }
+       s.routedHandlers = append(s.routedHandlers, rh)
+}
+
+func (s *Server) handlerForRoute(method string, path string) (http.HandlerFunc, bool) {
+       for _, rh := range s.routedHandlers {
+               if rh.method == method {
+                       if rh.pathRegexp != nil {
+                               if rh.pathRegexp.Match([]byte(path)) {
+                                       return rh.handler, true
+                               }
+                       } else if rh.path == path {
+                               return rh.handler, true
+                       }
+               }
+       }
+
+       return nil, false
+}
+
+//AppendHandlers will appends http.HandlerFuncs to the server's list of registered handlers.  The first incoming request is handled by the first handler, the second by the second, etc...
+func (s *Server) AppendHandlers(handlers ...http.HandlerFunc) {
+       s.writeLock.Lock()
+       defer s.writeLock.Unlock()
+
+       s.requestHandlers = append(s.requestHandlers, handlers...)
+}
+
+//SetHandler overrides the registered handler at the passed in index with the passed in handler
+//This is useful, for example, when a server has been set up in a shared context, but must be tweaked
+//for a particular test.
+func (s *Server) SetHandler(index int, handler http.HandlerFunc) {
+       s.writeLock.Lock()
+       defer s.writeLock.Unlock()
+
+       s.requestHandlers[index] = handler
+}
+
+//GetHandler returns the handler registered at the passed in index.
+func (s *Server) GetHandler(index int) http.HandlerFunc {
+       s.writeLock.Lock()
+       defer s.writeLock.Unlock()
+
+       return s.requestHandlers[index]
+}
+
+//WrapHandler combines the passed in handler with the handler registered at the passed in index.
+//This is useful, for example, when a server has been set up in a shared context but must be tweaked
+//for a particular test.
+//
+//If the currently registered handler is A, and the new passed in handler is B then
+//WrapHandler will generate a new handler that first calls A, then calls B, and assign it to index
+func (s *Server) WrapHandler(index int, handler http.HandlerFunc) {
+       existingHandler := s.GetHandler(index)
+       s.SetHandler(index, CombineHandlers(existingHandler, handler))
+}
+
+func (s *Server) CloseClientConnections() {
+       s.writeLock.Lock()
+       defer s.writeLock.Unlock()
+
+       s.HTTPTestServer.CloseClientConnections()
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/gomega_dsl.go b/Godeps/_workspace/src/github.com/onsi/gomega/gomega_dsl.go
new file mode 100644 (file)
index 0000000..78bd188
--- /dev/null
@@ -0,0 +1,335 @@
+/*
+Gomega is the Ginkgo BDD-style testing framework's preferred matcher library.
+
+The godoc documentation describes Gomega's API.  More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/
+
+Gomega on Github: http://github.com/onsi/gomega
+
+Learn more about Ginkgo online: http://onsi.github.io/ginkgo
+
+Ginkgo on Github: http://github.com/onsi/ginkgo
+
+Gomega is MIT-Licensed
+*/
+package gomega
+
+import (
+       "fmt"
+       "reflect"
+       "time"
+
+       "github.com/onsi/gomega/internal/assertion"
+       "github.com/onsi/gomega/internal/asyncassertion"
+       "github.com/onsi/gomega/internal/testingtsupport"
+       "github.com/onsi/gomega/types"
+)
+
+const GOMEGA_VERSION = "1.0"
+
+const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
+If you're using Ginkgo then you probably forgot to put your assertion in an It().
+Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT().
+`
+
+var globalFailHandler types.GomegaFailHandler
+
+var defaultEventuallyTimeout = time.Second
+var defaultEventuallyPollingInterval = 10 * time.Millisecond
+var defaultConsistentlyDuration = 100 * time.Millisecond
+var defaultConsistentlyPollingInterval = 10 * time.Millisecond
+
+//RegisterFailHandler connects Ginkgo to Gomega.  When a matcher fails
+//the fail handler passed into RegisterFailHandler is called.
+func RegisterFailHandler(handler types.GomegaFailHandler) {
+       globalFailHandler = handler
+}
+
+//RegisterTestingT connects Gomega to Golang's XUnit style
+//Testing.T tests.  You'll need to call this at the top of each XUnit style test:
+//
+// func TestFarmHasCow(t *testing.T) {
+//     RegisterTestingT(t)
+//
+//        f := farm.New([]string{"Cow", "Horse"})
+//     Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
+// }
+//
+// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to
+// pass `t` down to the matcher itself).  This means that you cannot run the XUnit style tests
+// in parallel as the global fail handler cannot point to more than one testing.T at a time.
+//
+// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*).
+func RegisterTestingT(t types.GomegaTestingT) {
+       RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t))
+}
+
+//InterceptGomegaHandlers runs a given callback and returns an array of
+//failure messages generated by any Gomega assertions within the callback.
+//
+//This is accomplished by temporarily replacing the *global* fail handler
+//with a fail handler that simply annotates failures.  The original fail handler
+//is reset when InterceptGomegaFailures returns.
+//
+//This is most useful when testing custom matchers, but can also be used to check
+//on a value using a Gomega assertion without causing a test failure.
+func InterceptGomegaFailures(f func()) []string {
+       originalHandler := globalFailHandler
+       failures := []string{}
+       RegisterFailHandler(func(message string, callerSkip ...int) {
+               failures = append(failures, message)
+       })
+       f()
+       RegisterFailHandler(originalHandler)
+       return failures
+}
+
+//Ω wraps an actual value allowing assertions to be made on it:
+//     Î©("foo").Should(Equal("foo"))
+//
+//If Î© is passed more than one argument it will pass the *first* argument to the matcher.
+//All subsequent arguments will be required to be nil/zero.
+//
+//This is convenient if you want to make an assertion on a method/function that returns
+//a value and an error - a common patter in Go.
+//
+//For example, given a function with signature:
+//  func MyAmazingThing() (int, error)
+//
+//Then:
+//    Î©(MyAmazingThing()).Should(Equal(3))
+//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
+//
+//Ω and Expect are identical
+func Î©(actual interface{}, extra ...interface{}) GomegaAssertion {
+       return ExpectWithOffset(0, actual, extra...)
+}
+
+//Expect wraps an actual value allowing assertions to be made on it:
+//     Expect("foo").To(Equal("foo"))
+//
+//If Expect is passed more than one argument it will pass the *first* argument to the matcher.
+//All subsequent arguments will be required to be nil/zero.
+//
+//This is convenient if you want to make an assertion on a method/function that returns
+//a value and an error - a common patter in Go.
+//
+//For example, given a function with signature:
+//  func MyAmazingThing() (int, error)
+//
+//Then:
+//    Expect(MyAmazingThing()).Should(Equal(3))
+//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
+//
+//Expect and Î© are identical
+func Expect(actual interface{}, extra ...interface{}) GomegaAssertion {
+       return ExpectWithOffset(0, actual, extra...)
+}
+
+//ExpectWithOffset wraps an actual value allowing assertions to be made on it:
+//    ExpectWithOffset(1, "foo").To(Equal("foo"))
+//
+//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
+//this is used to modify the call-stack offset when computing line numbers.
+//
+//This is most useful in helper functions that make assertions.  If you want Gomega's
+//error message to refer to the calling line in the test (as opposed to the line in the helper function)
+//set the first argument of `ExpectWithOffset` appropriately.
+func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion {
+       if globalFailHandler == nil {
+               panic(nilFailHandlerPanic)
+       }
+       return assertion.New(actual, globalFailHandler, offset, extra...)
+}
+
+//Eventually wraps an actual value allowing assertions to be made on it.
+//The assertion is tried periodically until it passes or a timeout occurs.
+//
+//Both the timeout and polling interval are configurable as optional arguments:
+//The first optional argument is the timeout
+//The second optional argument is the polling interval
+//
+//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers.  In the
+//last case they are interpreted as seconds.
+//
+//If Eventually is passed an actual that is a function taking no arguments and returning at least one value,
+//then Eventually will call the function periodically and try the matcher against the function's first return value.
+//
+//Example:
+//
+//    Eventually(func() int {
+//        return thingImPolling.Count()
+//    }).Should(BeNumerically(">=", 17))
+//
+//Note that this example could be rewritten:
+//
+//    Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17))
+//
+//If the function returns more than one value, then Eventually will pass the first value to the matcher and
+//assert that all other values are nil/zero.
+//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
+//
+//For example, consider a method that returns a value and an error:
+//    func FetchFromDB() (string, error)
+//
+//Then
+//    Eventually(FetchFromDB).Should(Equal("hasselhoff"))
+//
+//Will pass only if the the returned error is nil and the returned string passes the matcher.
+//
+//Eventually's default timeout is 1 second, and its default polling interval is 10ms
+func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+       return EventuallyWithOffset(0, actual, intervals...)
+}
+
+//EventuallyWithOffset operates like Eventually but takes an additional
+//initial argument to indicate an offset in the call stack.  This is useful when building helper
+//functions that contain matchers.  To learn more, read about `ExpectWithOffset`.
+func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+       if globalFailHandler == nil {
+               panic(nilFailHandlerPanic)
+       }
+       timeoutInterval := defaultEventuallyTimeout
+       pollingInterval := defaultEventuallyPollingInterval
+       if len(intervals) > 0 {
+               timeoutInterval = toDuration(intervals[0])
+       }
+       if len(intervals) > 1 {
+               pollingInterval = toDuration(intervals[1])
+       }
+       return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset)
+}
+
+//Consistently wraps an actual value allowing assertions to be made on it.
+//The assertion is tried periodically and is required to pass for a period of time.
+//
+//Both the total time and polling interval are configurable as optional arguments:
+//The first optional argument is the duration that Consistently will run for
+//The second optional argument is the polling interval
+//
+//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers.  In the
+//last case they are interpreted as seconds.
+//
+//If Consistently is passed an actual that is a function taking no arguments and returning at least one value,
+//then Consistently will call the function periodically and try the matcher against the function's first return value.
+//
+//If the function returns more than one value, then Consistently will pass the first value to the matcher and
+//assert that all other values are nil/zero.
+//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
+//
+//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem.
+//For example, you want to assert that a goroutine does *not* send data down a channel.  In this case, you could:
+//
+//  Consistently(channel).ShouldNot(Receive())
+//
+//Consistently's default duration is 100ms, and its default polling interval is 10ms
+func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+       return ConsistentlyWithOffset(0, actual, intervals...)
+}
+
+//ConsistentlyWithOffset operates like Consistnetly but takes an additional
+//initial argument to indicate an offset in the call stack.  This is useful when building helper
+//functions that contain matchers.  To learn more, read about `ExpectWithOffset`.
+func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+       if globalFailHandler == nil {
+               panic(nilFailHandlerPanic)
+       }
+       timeoutInterval := defaultConsistentlyDuration
+       pollingInterval := defaultConsistentlyPollingInterval
+       if len(intervals) > 0 {
+               timeoutInterval = toDuration(intervals[0])
+       }
+       if len(intervals) > 1 {
+               pollingInterval = toDuration(intervals[1])
+       }
+       return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset)
+}
+
+//Set the default timeout duration for Eventually.  Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
+func SetDefaultEventuallyTimeout(t time.Duration) {
+       defaultEventuallyTimeout = t
+}
+
+//Set the default polling interval for Eventually.
+func SetDefaultEventuallyPollingInterval(t time.Duration) {
+       defaultEventuallyPollingInterval = t
+}
+
+//Set the default duration for Consistently.  Consistently will verify that your condition is satsified for this long.
+func SetDefaultConsistentlyDuration(t time.Duration) {
+       defaultConsistentlyDuration = t
+}
+
+//Set the default polling interval for Consistently.
+func SetDefaultConsistentlyPollingInterval(t time.Duration) {
+       defaultConsistentlyPollingInterval = t
+}
+
+//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
+//the matcher passed to the Should and ShouldNot methods.
+//
+//Both Should and ShouldNot take a variadic optionalDescription argument.  This is passed on to
+//fmt.Sprintf() and is used to annotate failure messages.  This allows you to make your failure messages more
+//descriptive
+//
+//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
+//
+//Example:
+//
+//  Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
+//  Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.")
+type GomegaAsyncAssertion interface {
+       Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+       ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+}
+
+//GomegaAssertion is returned by Î© and Expect and compares the actual value to the matcher
+//passed to the Should/ShouldNot and To/ToNot/NotTo methods.
+//
+//Typically Should/ShouldNot are used with Î© and To/ToNot/NotTo are used with Expect
+//though this is not enforced.
+//
+//All methods take a variadic optionalDescription argument.  This is passed on to fmt.Sprintf()
+//and is used to annotate failure messages.
+//
+//All methods return a bool that is true if hte assertion passed and false if it failed.
+//
+//Example:
+//
+//   Î©(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
+type GomegaAssertion interface {
+       Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+       ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+
+       To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+       ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+       NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
+}
+
+//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
+type OmegaMatcher types.GomegaMatcher
+
+func toDuration(input interface{}) time.Duration {
+       duration, ok := input.(time.Duration)
+       if ok {
+               return duration
+       }
+
+       value := reflect.ValueOf(input)
+       kind := reflect.TypeOf(input).Kind()
+
+       if reflect.Int <= kind && kind <= reflect.Int64 {
+               return time.Duration(value.Int()) * time.Second
+       } else if reflect.Uint <= kind && kind <= reflect.Uint64 {
+               return time.Duration(value.Uint()) * time.Second
+       } else if reflect.Float32 <= kind && kind <= reflect.Float64 {
+               return time.Duration(value.Float() * float64(time.Second))
+       } else if reflect.String == kind {
+               duration, err := time.ParseDuration(value.String())
+               if err != nil {
+                       panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input))
+               }
+               return duration
+       }
+
+       panic(fmt.Sprintf("%v is not a valid interval.  Must be time.Duration, parsable duration string or a number.", input))
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/internal/assertion/assertion.go b/Godeps/_workspace/src/github.com/onsi/gomega/internal/assertion/assertion.go
new file mode 100644 (file)
index 0000000..b73673f
--- /dev/null
@@ -0,0 +1,98 @@
+package assertion
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/onsi/gomega/types"
+)
+
+type Assertion struct {
+       actualInput interface{}
+       fail        types.GomegaFailHandler
+       offset      int
+       extra       []interface{}
+}
+
+func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion {
+       return &Assertion{
+               actualInput: actualInput,
+               fail:        fail,
+               offset:      offset,
+               extra:       extra,
+       }
+}
+
+func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+       return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+       return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+       return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+       return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+       return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
+       switch len(optionalDescription) {
+       case 0:
+               return ""
+       default:
+               return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
+       }
+}
+
+func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+       matches, err := matcher.Match(assertion.actualInput)
+       description := assertion.buildDescription(optionalDescription...)
+       if err != nil {
+               assertion.fail(description+err.Error(), 2+assertion.offset)
+               return false
+       }
+       if matches != desiredMatch {
+               var message string
+               if desiredMatch {
+                       message = matcher.FailureMessage(assertion.actualInput)
+               } else {
+                       message = matcher.NegatedFailureMessage(assertion.actualInput)
+               }
+               assertion.fail(description+message, 2+assertion.offset)
+               return false
+       }
+
+       return true
+}
+
+func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
+       success, message := vetExtras(assertion.extra)
+       if success {
+               return true
+       }
+
+       description := assertion.buildDescription(optionalDescription...)
+       assertion.fail(description+message, 2+assertion.offset)
+       return false
+}
+
+func vetExtras(extras []interface{}) (bool, string) {
+       for i, extra := range extras {
+               if extra != nil {
+                       zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
+                       if !reflect.DeepEqual(zeroValue, extra) {
+                               message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
+                               return false, message
+                       }
+               }
+       }
+       return true, ""
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/Godeps/_workspace/src/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
new file mode 100644 (file)
index 0000000..bce0853
--- /dev/null
@@ -0,0 +1,189 @@
+package asyncassertion
+
+import (
+       "errors"
+       "fmt"
+       "reflect"
+       "time"
+
+       "github.com/onsi/gomega/internal/oraclematcher"
+       "github.com/onsi/gomega/types"
+)
+
+type AsyncAssertionType uint
+
+const (
+       AsyncAssertionTypeEventually AsyncAssertionType = iota
+       AsyncAssertionTypeConsistently
+)
+
+type AsyncAssertion struct {
+       asyncType       AsyncAssertionType
+       actualInput     interface{}
+       timeoutInterval time.Duration
+       pollingInterval time.Duration
+       fail            types.GomegaFailHandler
+       offset          int
+}
+
+func New(asyncType AsyncAssertionType, actualInput interface{}, fail types.GomegaFailHandler, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion {
+       actualType := reflect.TypeOf(actualInput)
+       if actualType.Kind() == reflect.Func {
+               if actualType.NumIn() != 0 || actualType.NumOut() == 0 {
+                       panic("Expected a function with no arguments and one or more return values.")
+               }
+       }
+
+       return &AsyncAssertion{
+               asyncType:       asyncType,
+               actualInput:     actualInput,
+               fail:            fail,
+               timeoutInterval: timeoutInterval,
+               pollingInterval: pollingInterval,
+               offset:          offset,
+       }
+}
+
+func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+       return assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+       return assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
+       switch len(optionalDescription) {
+       case 0:
+               return ""
+       default:
+               return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
+       }
+}
+
+func (assertion *AsyncAssertion) actualInputIsAFunction() bool {
+       actualType := reflect.TypeOf(assertion.actualInput)
+       return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0
+}
+
+func (assertion *AsyncAssertion) pollActual() (interface{}, error) {
+       if assertion.actualInputIsAFunction() {
+               values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{})
+
+               extras := []interface{}{}
+               for _, value := range values[1:] {
+                       extras = append(extras, value.Interface())
+               }
+
+               success, message := vetExtras(extras)
+
+               if !success {
+                       return nil, errors.New(message)
+               }
+
+               return values[0].Interface(), nil
+       }
+
+       return assertion.actualInput, nil
+}
+
+func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool {
+       if assertion.actualInputIsAFunction() {
+               return true
+       }
+
+       return oraclematcher.MatchMayChangeInTheFuture(matcher, value)
+}
+
+func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+       timer := time.Now()
+       timeout := time.After(assertion.timeoutInterval)
+
+       description := assertion.buildDescription(optionalDescription...)
+
+       var matches bool
+       var err error
+       mayChange := true
+       value, err := assertion.pollActual()
+       if err == nil {
+               mayChange = assertion.matcherMayChange(matcher, value)
+               matches, err = matcher.Match(value)
+       }
+
+       fail := func(preamble string) {
+               errMsg := ""
+               message := ""
+               if err != nil {
+                       errMsg = "Error: " + err.Error()
+               } else {
+                       if desiredMatch {
+                               message = matcher.FailureMessage(value)
+                       } else {
+                               message = matcher.NegatedFailureMessage(value)
+                       }
+               }
+               assertion.fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset)
+       }
+
+       if assertion.asyncType == AsyncAssertionTypeEventually {
+               for {
+                       if err == nil && matches == desiredMatch {
+                               return true
+                       }
+
+                       if !mayChange {
+                               fail("No future change is possible.  Bailing out early")
+                               return false
+                       }
+
+                       select {
+                       case <-time.After(assertion.pollingInterval):
+                               value, err = assertion.pollActual()
+                               if err == nil {
+                                       mayChange = assertion.matcherMayChange(matcher, value)
+                                       matches, err = matcher.Match(value)
+                               }
+                       case <-timeout:
+                               fail("Timed out")
+                               return false
+                       }
+               }
+       } else if assertion.asyncType == AsyncAssertionTypeConsistently {
+               for {
+                       if !(err == nil && matches == desiredMatch) {
+                               fail("Failed")
+                               return false
+                       }
+
+                       if !mayChange {
+                               return true
+                       }
+
+                       select {
+                       case <-time.After(assertion.pollingInterval):
+                               value, err = assertion.pollActual()
+                               if err == nil {
+                                       mayChange = assertion.matcherMayChange(matcher, value)
+                                       matches, err = matcher.Match(value)
+                               }
+                       case <-timeout:
+                               return true
+                       }
+               }
+       }
+
+       return false
+}
+
+func vetExtras(extras []interface{}) (bool, string) {
+       for i, extra := range extras {
+               if extra != nil {
+                       zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
+                       if !reflect.DeepEqual(zeroValue, extra) {
+                               message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
+                               return false, message
+                       }
+               }
+       }
+       return true, ""
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go
new file mode 100644 (file)
index 0000000..6e351a7
--- /dev/null
@@ -0,0 +1,23 @@
+package fakematcher
+
+import "fmt"
+
+type FakeMatcher struct {
+       ReceivedActual  interface{}
+       MatchesToReturn bool
+       ErrToReturn     error
+}
+
+func (matcher *FakeMatcher) Match(actual interface{}) (bool, error) {
+       matcher.ReceivedActual = actual
+
+       return matcher.MatchesToReturn, matcher.ErrToReturn
+}
+
+func (matcher *FakeMatcher) FailureMessage(actual interface{}) string {
+       return fmt.Sprintf("positive: %v", actual)
+}
+
+func (matcher *FakeMatcher) NegatedFailureMessage(actual interface{}) string {
+       return fmt.Sprintf("negative: %v", actual)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go
new file mode 100644 (file)
index 0000000..66cad88
--- /dev/null
@@ -0,0 +1,25 @@
+package oraclematcher
+
+import "github.com/onsi/gomega/types"
+
+/*
+GomegaMatchers that also match the OracleMatcher interface can convey information about
+whether or not their result will change upon future attempts.
+
+This allows `Eventually` and `Consistently` to short circuit if success becomes impossible.
+
+For example, a process' exit code can never change.  So, gexec's Exit matcher returns `true`
+for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
+*/
+type OracleMatcher interface {
+       MatchMayChangeInTheFuture(actual interface{}) bool
+}
+
+func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool {
+       oracleMatcher, ok := matcher.(OracleMatcher)
+       if !ok {
+               return true
+       }
+
+       return oracleMatcher.MatchMayChangeInTheFuture(value)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/Godeps/_workspace/src/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
new file mode 100644 (file)
index 0000000..7871fd4
--- /dev/null
@@ -0,0 +1,40 @@
+package testingtsupport
+
+import (
+       "regexp"
+       "runtime/debug"
+       "strings"
+
+       "github.com/onsi/gomega/types"
+)
+
+type gomegaTestingT interface {
+       Errorf(format string, args ...interface{})
+}
+
+func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler {
+       return func(message string, callerSkip ...int) {
+               skip := 1
+               if len(callerSkip) > 0 {
+                       skip = callerSkip[0]
+               }
+               stackTrace := pruneStack(string(debug.Stack()), skip)
+               t.Errorf("\n%s\n%s", stackTrace, message)
+       }
+}
+
+func pruneStack(fullStackTrace string, skip int) string {
+       stack := strings.Split(fullStackTrace, "\n")
+       if len(stack) > 2*(skip+1) {
+               stack = stack[2*(skip+1):]
+       }
+       prunedStack := []string{}
+       re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
+       for i := 0; i < len(stack)/2; i++ {
+               if !re.Match([]byte(stack[i*2])) {
+                       prunedStack = append(prunedStack, stack[i*2])
+                       prunedStack = append(prunedStack, stack[i*2+1])
+               }
+       }
+       return strings.Join(prunedStack, "\n")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers.go
new file mode 100644 (file)
index 0000000..b6110c4
--- /dev/null
@@ -0,0 +1,393 @@
+package gomega
+
+import (
+       "time"
+
+       "github.com/onsi/gomega/matchers"
+       "github.com/onsi/gomega/types"
+)
+
+//Equal uses reflect.DeepEqual to compare actual with expected.  Equal is strict about
+//types when performing comparisons.
+//It is an error for both actual and expected to be nil.  Use BeNil() instead.
+func Equal(expected interface{}) types.GomegaMatcher {
+       return &matchers.EqualMatcher{
+               Expected: expected,
+       }
+}
+
+//BeEquivalentTo is more lax than Equal, allowing equality between different types.
+//This is done by converting actual to have the type of expected before
+//attempting equality with reflect.DeepEqual.
+//It is an error for actual and expected to be nil.  Use BeNil() instead.
+func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
+       return &matchers.BeEquivalentToMatcher{
+               Expected: expected,
+       }
+}
+
+//BeNil succeeds if actual is nil
+func BeNil() types.GomegaMatcher {
+       return &matchers.BeNilMatcher{}
+}
+
+//BeTrue succeeds if actual is true
+func BeTrue() types.GomegaMatcher {
+       return &matchers.BeTrueMatcher{}
+}
+
+//BeFalse succeeds if actual is false
+func BeFalse() types.GomegaMatcher {
+       return &matchers.BeFalseMatcher{}
+}
+
+//HaveOccurred succeeds if actual is a non-nil error
+//The typical Go error checking pattern looks like:
+//    err := SomethingThatMightFail()
+//    Î©(err).ShouldNot(HaveOccurred())
+func HaveOccurred() types.GomegaMatcher {
+       return &matchers.HaveOccurredMatcher{}
+}
+
+//Succeed passes if actual is a nil error
+//Succeed is intended to be used with functions that return a single error value. Instead of
+//    err := SomethingThatMightFail()
+//    Î©(err).ShouldNot(HaveOccurred())
+//
+//You can write:
+//    Î©(SomethingThatMightFail()).Should(Succeed())
+//
+//It is a mistake to use Succeed with a function that has multiple return values.  Gomega's Î© and Expect
+//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
+//This means that Î©(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
+func Succeed() types.GomegaMatcher {
+       return &matchers.SucceedMatcher{}
+}
+
+//MatchError succeeds if actual is a non-nil error that matches the passed in string/error.
+//
+//These are valid use-cases:
+//  Î©(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
+//  Î©(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
+//
+//It is an error for err to be nil or an object that does not implement the Error interface
+func MatchError(expected interface{}) types.GomegaMatcher {
+       return &matchers.MatchErrorMatcher{
+               Expected: expected,
+       }
+}
+
+//BeClosed succeeds if actual is a closed channel.
+//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
+//
+//In order to check whether or not the channel is closed, Gomega must try to read from the channel
+//(even in the `ShouldNot(BeClosed())` case).  You should keep this in mind if you wish to make subsequent assertions about
+//values coming down the channel.
+//
+//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
+//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
+//
+//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
+func BeClosed() types.GomegaMatcher {
+       return &matchers.BeClosedMatcher{}
+}
+
+//Receive succeeds if there is a value to be received on actual.
+//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
+//
+//Receive returns immediately and never blocks:
+//
+//- If there is nothing on the channel `c` then Î©(c).Should(Receive()) will fail and Î©(c).ShouldNot(Receive()) will pass.
+//
+//- If the channel `c` is closed then Î©(c).Should(Receive()) will fail and Î©(c).ShouldNot(Receive()) will pass.
+//
+//- If there is something on the channel `c` ready to be read, then Î©(c).Should(Receive()) will pass and Î©(c).ShouldNot(Receive()) will fail.
+//
+//If you have a go-routine running in the background that will write to channel `c` you can:
+//    Eventually(c).Should(Receive())
+//
+//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
+//
+//A similar use-case is to assert that no go-routine writes to a channel (for a period of time).  You can do this with `Consistently`:
+//    Consistently(c).ShouldNot(Receive())
+//
+//You can pass `Receive` a matcher.  If you do so, it will match the received object against the matcher.  For example:
+//    Î©(c).Should(Receive(Equal("foo")))
+//
+//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
+//
+//Passing Receive a matcher is especially useful when paired with Eventually:
+//
+//    Eventually(c).Should(Receive(ContainSubstring("bar")))
+//
+//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
+//
+//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
+//    var myThing thing
+//    Eventually(thingChan).Should(Receive(&myThing))
+//    Î©(myThing.Sprocket).Should(Equal("foo"))
+//    Î©(myThing.IsValid()).Should(BeTrue())
+func Receive(args ...interface{}) types.GomegaMatcher {
+       var arg interface{}
+       if len(args) > 0 {
+               arg = args[0]
+       }
+
+       return &matchers.ReceiveMatcher{
+               Arg: arg,
+       }
+}
+
+//BeSent succeeds if a value can be sent to actual.
+//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
+//In addition, actual must not be closed.
+//
+//BeSent never blocks:
+//
+//- If the channel `c` is not ready to receive then Î©(c).Should(BeSent("foo")) will fail immediately
+//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive  before Eventually's timeout
+//- If the channel `c` is closed then Î©(c).Should(BeSent("foo")) and Î©(c).ShouldNot(BeSent("foo")) will both fail immediately
+//
+//Of course, the value is actually sent to the channel.  The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
+//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
+func BeSent(arg interface{}) types.GomegaMatcher {
+       return &matchers.BeSentMatcher{
+               Arg: arg,
+       }
+}
+
+//MatchRegexp succeeds if actual is a string or stringer that matches the
+//passed-in regexp.  Optional arguments can be provided to construct a regexp
+//via fmt.Sprintf().
+func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
+       return &matchers.MatchRegexpMatcher{
+               Regexp: regexp,
+               Args:   args,
+       }
+}
+
+//ContainSubstring succeeds if actual is a string or stringer that contains the
+//passed-in regexp.  Optional arguments can be provided to construct the substring
+//via fmt.Sprintf().
+func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
+       return &matchers.ContainSubstringMatcher{
+               Substr: substr,
+               Args:   args,
+       }
+}
+
+//HavePrefix succeeds if actual is a string or stringer that contains the
+//passed-in string as a prefix.  Optional arguments can be provided to construct
+//via fmt.Sprintf().
+func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
+       return &matchers.HavePrefixMatcher{
+               Prefix: prefix,
+               Args:   args,
+       }
+}
+
+//HaveSuffix succeeds if actual is a string or stringer that contains the
+//passed-in string as a suffix.  Optional arguments can be provided to construct
+//via fmt.Sprintf().
+func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
+       return &matchers.HaveSuffixMatcher{
+               Suffix: suffix,
+               Args:   args,
+       }
+}
+
+//MatchJSON succeeds if actual is a string or stringer of JSON that matches
+//the expected JSON.  The JSONs are decoded and the resulting objects are compared via
+//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
+func MatchJSON(json interface{}) types.GomegaMatcher {
+       return &matchers.MatchJSONMatcher{
+               JSONToMatch: json,
+       }
+}
+
+//BeEmpty succeeds if actual is empty.  Actual must be of type string, array, map, chan, or slice.
+func BeEmpty() types.GomegaMatcher {
+       return &matchers.BeEmptyMatcher{}
+}
+
+//HaveLen succeeds if actual has the passed-in length.  Actual must be of type string, array, map, chan, or slice.
+func HaveLen(count int) types.GomegaMatcher {
+       return &matchers.HaveLenMatcher{
+               Count: count,
+       }
+}
+
+//BeZero succeeds if actual is the zero value for its type or if actual is nil.
+func BeZero() types.GomegaMatcher {
+       return &matchers.BeZeroMatcher{}
+}
+
+//ContainElement succeeds if actual contains the passed in element.
+//By default ContainElement() uses Equal() to perform the match, however a
+//matcher can be passed in instead:
+//    Î©([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
+//
+//Actual must be an array, slice or map.
+//For maps, ContainElement searches through the map's values.
+func ContainElement(element interface{}) types.GomegaMatcher {
+       return &matchers.ContainElementMatcher{
+               Element: element,
+       }
+}
+
+//ConsistOf succeeds if actual contains preciely the elements passed into the matcher.  The ordering of the elements does not matter.
+//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead.  Here are some examples:
+//
+//    Î©([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
+//    Î©([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
+//    Î©([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
+//
+//Actual must be an array, slice or map.  For maps, ConsistOf matches against the map's values.
+//
+//You typically pass variadic arguments to ConsistOf (as in the examples above).  However, if you need to pass in a slice you can provided that it
+//is the only element passed in to ConsistOf:
+//
+//    Î©([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
+//
+//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
+func ConsistOf(elements ...interface{}) types.GomegaMatcher {
+       return &matchers.ConsistOfMatcher{
+               Elements: elements,
+       }
+}
+
+//HaveKey succeeds if actual is a map with the passed in key.
+//By default HaveKey uses Equal() to perform the match, however a
+//matcher can be passed in instead:
+//    Î©(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
+func HaveKey(key interface{}) types.GomegaMatcher {
+       return &matchers.HaveKeyMatcher{
+               Key: key,
+       }
+}
+
+//HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
+//By default HaveKeyWithValue uses Equal() to perform the match, however a
+//matcher can be passed in instead:
+//    Î©(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
+//    Î©(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
+func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
+       return &matchers.HaveKeyWithValueMatcher{
+               Key:   key,
+               Value: value,
+       }
+}
+
+//BeNumerically performs numerical assertions in a type-agnostic way.
+//Actual and expected should be numbers, though the specific type of
+//number is irrelevant (floa32, float64, uint8, etc...).
+//
+//There are six, self-explanatory, supported comparators:
+//    Î©(1.0).Should(BeNumerically("==", 1))
+//    Î©(1.0).Should(BeNumerically("~", 0.999, 0.01))
+//    Î©(1.0).Should(BeNumerically(">", 0.9))
+//    Î©(1.0).Should(BeNumerically(">=", 1.0))
+//    Î©(1.0).Should(BeNumerically("<", 3))
+//    Î©(1.0).Should(BeNumerically("<=", 1.0))
+func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
+       return &matchers.BeNumericallyMatcher{
+               Comparator: comparator,
+               CompareTo:  compareTo,
+       }
+}
+
+//BeTemporally compares time.Time's like BeNumerically
+//Actual and expected must be time.Time. The comparators are the same as for BeNumerically
+//    Î©(time.Now()).Should(BeTemporally(">", time.Time{}))
+//    Î©(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
+func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher {
+       return &matchers.BeTemporallyMatcher{
+               Comparator: comparator,
+               CompareTo:  compareTo,
+               Threshold:  threshold,
+       }
+}
+
+//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
+//It will return an error when one of the values is nil.
+//       Î©(0).Should(BeAssignableToTypeOf(0))         // Same values
+//       Î©(5).Should(BeAssignableToTypeOf(-1))        // different values same type
+//       Î©("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
+//    Î©(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
+func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
+       return &matchers.AssignableToTypeOfMatcher{
+               Expected: expected,
+       }
+}
+
+//Panic succeeds if actual is a function that, when invoked, panics.
+//Actual must be a function that takes no arguments and returns no results.
+func Panic() types.GomegaMatcher {
+       return &matchers.PanicMatcher{}
+}
+
+//BeAnExistingFile succeeds if a file exists.
+//Actual must be a string representing the abs path to the file being checked.
+func BeAnExistingFile() types.GomegaMatcher {
+       return &matchers.BeAnExistingFileMatcher{}
+}
+
+//BeARegularFile succeeds iff a file exists and is a regular file.
+//Actual must be a string representing the abs path to the file being checked.
+func BeARegularFile() types.GomegaMatcher {
+       return &matchers.BeARegularFileMatcher{}
+}
+
+//BeADirectory succeeds iff a file exists and is a directory.
+//Actual must be a string representing the abs path to the file being checked.
+func BeADirectory() types.GomegaMatcher {
+       return &matchers.BeADirectoryMatcher{}
+}
+
+//And succeeds only if all of the given matchers succeed.
+//The matchers are tried in order, and will fail-fast if one doesn't succeed.
+//  Expect("hi").To(And(HaveLen(2), Equal("hi"))
+//
+//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func And(ms ...types.GomegaMatcher) types.GomegaMatcher {
+       return &matchers.AndMatcher{Matchers: ms}
+}
+
+//SatisfyAll is an alias for And().
+//  Î©("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
+func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher {
+       return And(matchers...)
+}
+
+//Or succeeds if any of the given matchers succeed.
+//The matchers are tried in order and will return immediately upon the first successful match.
+//  Expect("hi").To(Or(HaveLen(3), HaveLen(2))
+//
+//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func Or(ms ...types.GomegaMatcher) types.GomegaMatcher {
+       return &matchers.OrMatcher{Matchers: ms}
+}
+
+//SatisfyAny is an alias for Or().
+//  Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
+func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher {
+       return Or(matchers...)
+}
+
+//Not negates the given matcher; it succeeds if the given matcher fails.
+//  Expect(1).To(Not(Equal(2))
+//
+//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
+       return &matchers.NotMatcher{Matcher: matcher}
+}
+
+//WithTransform applies the `transform` to the actual value and matches it against `matcher`.
+//The given transform must be a function of one parameter that returns one value.
+//  var plus1 = func(i int) int { return i + 1 }
+//  Expect(1).To(WithTransform(plus1, Equal(2))
+//
+//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
+       return matchers.NewWithTransformMatcher(transform, matcher)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/and.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/and.go
new file mode 100644 (file)
index 0000000..94c42a7
--- /dev/null
@@ -0,0 +1,64 @@
+package matchers
+
+import (
+       "fmt"
+
+       "github.com/onsi/gomega/format"
+       "github.com/onsi/gomega/internal/oraclematcher"
+       "github.com/onsi/gomega/types"
+)
+
+type AndMatcher struct {
+       Matchers []types.GomegaMatcher
+
+       // state
+       firstFailedMatcher types.GomegaMatcher
+}
+
+func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
+       m.firstFailedMatcher = nil
+       for _, matcher := range m.Matchers {
+               success, err := matcher.Match(actual)
+               if !success || err != nil {
+                       m.firstFailedMatcher = matcher
+                       return false, err
+               }
+       }
+       return true, nil
+}
+
+func (m *AndMatcher) FailureMessage(actual interface{}) (message string) {
+       return m.firstFailedMatcher.FailureMessage(actual)
+}
+
+func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       // not the most beautiful list of matchers, but not bad either...
+       return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers))
+}
+
+func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+       /*
+               Example with 3 matchers: A, B, C
+
+               Match evaluates them: T, F, <?>  => F
+               So match is currently F, what should MatchMayChangeInTheFuture() return?
+               Seems like it only depends on B, since currently B MUST change to allow the result to become T
+
+               Match eval: T, T, T  => T
+               So match is currently T, what should MatchMayChangeInTheFuture() return?
+               Seems to depend on ANY of them being able to change to F.
+       */
+
+       if m.firstFailedMatcher == nil {
+               // so all matchers succeeded.. Any one of them changing would change the result.
+               for _, matcher := range m.Matchers {
+                       if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) {
+                               return true
+                       }
+               }
+               return false // none of were going to change
+       } else {
+               // one of the matchers failed.. it must be able to change in order to affect the result
+               return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual)
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
new file mode 100644 (file)
index 0000000..89a1fc2
--- /dev/null
@@ -0,0 +1,31 @@
+package matchers
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/onsi/gomega/format"
+)
+
+type AssignableToTypeOfMatcher struct {
+       Expected interface{}
+}
+
+func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
+       if actual == nil || matcher.Expected == nil {
+               return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead.  This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+       }
+
+       actualType := reflect.TypeOf(actual)
+       expectedType := reflect.TypeOf(matcher.Expected)
+
+       return actualType.AssignableTo(expectedType), nil
+}
+
+func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string {
+       return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected))
+}
+
+func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string {
+       return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected))
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_a_directory.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_a_directory.go
new file mode 100644 (file)
index 0000000..7b6975e
--- /dev/null
@@ -0,0 +1,54 @@
+package matchers
+
+import (
+       "fmt"
+       "os"
+
+       "github.com/onsi/gomega/format"
+)
+
+type notADirectoryError struct {
+       os.FileInfo
+}
+
+func (t notADirectoryError) Error() string {
+       fileInfo := os.FileInfo(t)
+       switch {
+       case fileInfo.Mode().IsRegular():
+               return "file is a regular file"
+       default:
+               return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
+       }
+}
+
+type BeADirectoryMatcher struct {
+       expected interface{}
+       err      error
+}
+
+func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) {
+       actualFilename, ok := actual.(string)
+       if !ok {
+               return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path")
+       }
+
+       fileInfo, err := os.Stat(actualFilename)
+       if err != nil {
+               matcher.err = err
+               return false, nil
+       }
+
+       if !fileInfo.Mode().IsDir() {
+               matcher.err = notADirectoryError{fileInfo}
+               return false, nil
+       }
+       return true, nil
+}
+
+func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err))
+}
+
+func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, fmt.Sprintf("not be a directory"))
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_a_regular_file.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_a_regular_file.go
new file mode 100644 (file)
index 0000000..e239131
--- /dev/null
@@ -0,0 +1,54 @@
+package matchers
+
+import (
+       "fmt"
+       "os"
+
+       "github.com/onsi/gomega/format"
+)
+
+type notARegularFileError struct {
+       os.FileInfo
+}
+
+func (t notARegularFileError) Error() string {
+       fileInfo := os.FileInfo(t)
+       switch {
+       case fileInfo.IsDir():
+               return "file is a directory"
+       default:
+               return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
+       }
+}
+
+type BeARegularFileMatcher struct {
+       expected interface{}
+       err      error
+}
+
+func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) {
+       actualFilename, ok := actual.(string)
+       if !ok {
+               return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path")
+       }
+
+       fileInfo, err := os.Stat(actualFilename)
+       if err != nil {
+               matcher.err = err
+               return false, nil
+       }
+
+       if !fileInfo.Mode().IsRegular() {
+               matcher.err = notARegularFileError{fileInfo}
+               return false, nil
+       }
+       return true, nil
+}
+
+func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err))
+}
+
+func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, fmt.Sprintf("not be a regular file"))
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_an_existing_file.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_an_existing_file.go
new file mode 100644 (file)
index 0000000..d42eba2
--- /dev/null
@@ -0,0 +1,38 @@
+package matchers
+
+import (
+       "fmt"
+       "os"
+
+       "github.com/onsi/gomega/format"
+)
+
+type BeAnExistingFileMatcher struct {
+       expected interface{}
+}
+
+func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) {
+       actualFilename, ok := actual.(string)
+       if !ok {
+               return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path")
+       }
+
+       if _, err = os.Stat(actualFilename); err != nil {
+               switch {
+               case os.IsNotExist(err):
+                       return false, nil
+               default:
+                       return false, err
+               }
+       }
+
+       return true, nil
+}
+
+func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, fmt.Sprintf("to exist"))
+}
+
+func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, fmt.Sprintf("not to exist"))
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_closed_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_closed_matcher.go
new file mode 100644 (file)
index 0000000..c1b4995
--- /dev/null
@@ -0,0 +1,45 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "reflect"
+)
+
+type BeClosedMatcher struct {
+}
+
+func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) {
+       if !isChan(actual) {
+               return false, fmt.Errorf("BeClosed matcher expects a channel.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       channelType := reflect.TypeOf(actual)
+       channelValue := reflect.ValueOf(actual)
+
+       if channelType.ChanDir() == reflect.SendDir {
+               return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       winnerIndex, _, open := reflect.Select([]reflect.SelectCase{
+               reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue},
+               reflect.SelectCase{Dir: reflect.SelectDefault},
+       })
+
+       var closed bool
+       if winnerIndex == 0 {
+               closed = !open
+       } else if winnerIndex == 1 {
+               closed = false
+       }
+
+       return closed, nil
+}
+
+func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to be closed")
+}
+
+func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to be open")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_empty_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_empty_matcher.go
new file mode 100644 (file)
index 0000000..55bdd7d
--- /dev/null
@@ -0,0 +1,26 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+)
+
+type BeEmptyMatcher struct {
+}
+
+func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) {
+       length, ok := lengthOf(actual)
+       if !ok {
+               return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       return length == 0, nil
+}
+
+func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to be empty")
+}
+
+func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to be empty")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
new file mode 100644 (file)
index 0000000..32a0c31
--- /dev/null
@@ -0,0 +1,33 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "reflect"
+)
+
+type BeEquivalentToMatcher struct {
+       Expected interface{}
+}
+
+func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) {
+       if actual == nil && matcher.Expected == nil {
+               return false, fmt.Errorf("Both actual and expected must not be nil.")
+       }
+
+       convertedActual := actual
+
+       if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) {
+               convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface()
+       }
+
+       return reflect.DeepEqual(convertedActual, matcher.Expected), nil
+}
+
+func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to be equivalent to", matcher.Expected)
+}
+
+func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to be equivalent to", matcher.Expected)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_false_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_false_matcher.go
new file mode 100644 (file)
index 0000000..0b224cb
--- /dev/null
@@ -0,0 +1,25 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+)
+
+type BeFalseMatcher struct {
+}
+
+func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) {
+       if !isBool(actual) {
+               return false, fmt.Errorf("Expected a boolean.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       return actual == false, nil
+}
+
+func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to be false")
+}
+
+func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to be false")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_nil_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_nil_matcher.go
new file mode 100644 (file)
index 0000000..7ee84fe
--- /dev/null
@@ -0,0 +1,18 @@
+package matchers
+
+import "github.com/onsi/gomega/format"
+
+type BeNilMatcher struct {
+}
+
+func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) {
+       return isNil(actual), nil
+}
+
+func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to be nil")
+}
+
+func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to be nil")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_numerically_matcher.go
new file mode 100644 (file)
index 0000000..52f83fe
--- /dev/null
@@ -0,0 +1,119 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "math"
+)
+
+type BeNumericallyMatcher struct {
+       Comparator string
+       CompareTo  []interface{}
+}
+
+func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo[0])
+}
+
+func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo[0])
+}
+
+func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) {
+       if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 {
+               return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments.  Got:\n%s", format.Object(matcher.CompareTo, 1))
+       }
+       if !isNumber(actual) {
+               return false, fmt.Errorf("Expected a number.  Got:\n%s", format.Object(actual, 1))
+       }
+       if !isNumber(matcher.CompareTo[0]) {
+               return false, fmt.Errorf("Expected a number.  Got:\n%s", format.Object(matcher.CompareTo[0], 1))
+       }
+       if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) {
+               return false, fmt.Errorf("Expected a number.  Got:\n%s", format.Object(matcher.CompareTo[0], 1))
+       }
+
+       switch matcher.Comparator {
+       case "==", "~", ">", ">=", "<", "<=":
+       default:
+               return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
+       }
+
+       if isFloat(actual) || isFloat(matcher.CompareTo[0]) {
+               var secondOperand float64 = 1e-8
+               if len(matcher.CompareTo) == 2 {
+                       secondOperand = toFloat(matcher.CompareTo[1])
+               }
+               success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand)
+       } else if isInteger(actual) {
+               var secondOperand int64 = 0
+               if len(matcher.CompareTo) == 2 {
+                       secondOperand = toInteger(matcher.CompareTo[1])
+               }
+               success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand)
+       } else if isUnsignedInteger(actual) {
+               var secondOperand uint64 = 0
+               if len(matcher.CompareTo) == 2 {
+                       secondOperand = toUnsignedInteger(matcher.CompareTo[1])
+               }
+               success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand)
+       } else {
+               return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1))
+       }
+
+       return success, nil
+}
+
+func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) {
+       switch matcher.Comparator {
+       case "==", "~":
+               diff := actual - compareTo
+               return -threshold <= diff && diff <= threshold
+       case ">":
+               return (actual > compareTo)
+       case ">=":
+               return (actual >= compareTo)
+       case "<":
+               return (actual < compareTo)
+       case "<=":
+               return (actual <= compareTo)
+       }
+       return false
+}
+
+func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) {
+       switch matcher.Comparator {
+       case "==", "~":
+               if actual < compareTo {
+                       actual, compareTo = compareTo, actual
+               }
+               return actual-compareTo <= threshold
+       case ">":
+               return (actual > compareTo)
+       case ">=":
+               return (actual >= compareTo)
+       case "<":
+               return (actual < compareTo)
+       case "<=":
+               return (actual <= compareTo)
+       }
+       return false
+}
+
+func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) {
+       switch matcher.Comparator {
+       case "~":
+               return math.Abs(actual-compareTo) <= threshold
+       case "==":
+               return (actual == compareTo)
+       case ">":
+               return (actual > compareTo)
+       case ">=":
+               return (actual >= compareTo)
+       case "<":
+               return (actual < compareTo)
+       case "<=":
+               return (actual <= compareTo)
+       }
+       return false
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_sent_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_sent_matcher.go
new file mode 100644 (file)
index 0000000..d7c3223
--- /dev/null
@@ -0,0 +1,71 @@
+package matchers
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/onsi/gomega/format"
+)
+
+type BeSentMatcher struct {
+       Arg           interface{}
+       channelClosed bool
+}
+
+func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) {
+       if !isChan(actual) {
+               return false, fmt.Errorf("BeSent expects a channel.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       channelType := reflect.TypeOf(actual)
+       channelValue := reflect.ValueOf(actual)
+
+       if channelType.ChanDir() == reflect.RecvDir {
+               return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       argType := reflect.TypeOf(matcher.Arg)
+       assignable := argType.AssignableTo(channelType.Elem())
+
+       if !assignable {
+               return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1))
+       }
+
+       argValue := reflect.ValueOf(matcher.Arg)
+
+       defer func() {
+               if e := recover(); e != nil {
+                       success = false
+                       err = fmt.Errorf("Cannot send to a closed channel")
+                       matcher.channelClosed = true
+               }
+       }()
+
+       winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{
+               reflect.SelectCase{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue},
+               reflect.SelectCase{Dir: reflect.SelectDefault},
+       })
+
+       var didSend bool
+       if winnerIndex == 0 {
+               didSend = true
+       }
+
+       return didSend, nil
+}
+
+func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to send:", matcher.Arg)
+}
+
+func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to send:", matcher.Arg)
+}
+
+func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+       if !isChan(actual) {
+               return false
+       }
+
+       return !matcher.channelClosed
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_temporally_matcher.go
new file mode 100644 (file)
index 0000000..abda4eb
--- /dev/null
@@ -0,0 +1,65 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "time"
+)
+
+type BeTemporallyMatcher struct {
+       Comparator string
+       CompareTo  time.Time
+       Threshold  []time.Duration
+}
+
+func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo)
+}
+
+func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo)
+}
+
+func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) {
+       // predicate to test for time.Time type
+       isTime := func(t interface{}) bool {
+               _, ok := t.(time.Time)
+               return ok
+       }
+
+       if !isTime(actual) {
+               return false, fmt.Errorf("Expected a time.Time.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       switch matcher.Comparator {
+       case "==", "~", ">", ">=", "<", "<=":
+       default:
+               return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
+       }
+
+       var threshold = time.Millisecond
+       if len(matcher.Threshold) == 1 {
+               threshold = matcher.Threshold[0]
+       }
+
+       return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil
+}
+
+func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) {
+       switch matcher.Comparator {
+       case "==":
+               return actual.Equal(compareTo)
+       case "~":
+               diff := actual.Sub(compareTo)
+               return -threshold <= diff && diff <= threshold
+       case ">":
+               return actual.After(compareTo)
+       case ">=":
+               return !actual.Before(compareTo)
+       case "<":
+               return actual.Before(compareTo)
+       case "<=":
+               return !actual.After(compareTo)
+       }
+       return false
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_true_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_true_matcher.go
new file mode 100644 (file)
index 0000000..1275e5f
--- /dev/null
@@ -0,0 +1,25 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+)
+
+type BeTrueMatcher struct {
+}
+
+func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) {
+       if !isBool(actual) {
+               return false, fmt.Errorf("Expected a boolean.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       return actual.(bool), nil
+}
+
+func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to be true")
+}
+
+func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to be true")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_zero_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/be_zero_matcher.go
new file mode 100644 (file)
index 0000000..b39c914
--- /dev/null
@@ -0,0 +1,27 @@
+package matchers
+
+import (
+       "github.com/onsi/gomega/format"
+       "reflect"
+)
+
+type BeZeroMatcher struct {
+}
+
+func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) {
+       if actual == nil {
+               return true, nil
+       }
+       zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface()
+
+       return reflect.DeepEqual(zeroValue, actual), nil
+
+}
+
+func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to be zero-valued")
+}
+
+func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to be zero-valued")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/consist_of.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/consist_of.go
new file mode 100644 (file)
index 0000000..7b0e088
--- /dev/null
@@ -0,0 +1,80 @@
+package matchers
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/onsi/gomega/format"
+       "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
+)
+
+type ConsistOfMatcher struct {
+       Elements []interface{}
+}
+
+func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
+       if !isArrayOrSlice(actual) && !isMap(actual) {
+               return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       elements := matcher.Elements
+       if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) {
+               elements = []interface{}{}
+               value := reflect.ValueOf(matcher.Elements[0])
+               for i := 0; i < value.Len(); i++ {
+                       elements = append(elements, value.Index(i).Interface())
+               }
+       }
+
+       matchers := []interface{}{}
+       for _, element := range elements {
+               matcher, isMatcher := element.(omegaMatcher)
+               if !isMatcher {
+                       matcher = &EqualMatcher{Expected: element}
+               }
+               matchers = append(matchers, matcher)
+       }
+
+       values := matcher.valuesOf(actual)
+
+       if len(values) != len(matchers) {
+               return false, nil
+       }
+
+       neighbours := func(v, m interface{}) (bool, error) {
+               match, err := m.(omegaMatcher).Match(v)
+               return match && err == nil, nil
+       }
+
+       bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours)
+       if err != nil {
+               return false, err
+       }
+
+       return len(bipartiteGraph.LargestMatching()) == len(values), nil
+}
+
+func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} {
+       value := reflect.ValueOf(actual)
+       values := []interface{}{}
+       if isMap(actual) {
+               keys := value.MapKeys()
+               for i := 0; i < value.Len(); i++ {
+                       values = append(values, value.MapIndex(keys[i]).Interface())
+               }
+       } else {
+               for i := 0; i < value.Len(); i++ {
+                       values = append(values, value.Index(i).Interface())
+               }
+       }
+
+       return values
+}
+
+func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to consist of", matcher.Elements)
+}
+
+func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to consist of", matcher.Elements)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/contain_element_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/contain_element_matcher.go
new file mode 100644 (file)
index 0000000..4159335
--- /dev/null
@@ -0,0 +1,56 @@
+package matchers
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/onsi/gomega/format"
+)
+
+type ContainElementMatcher struct {
+       Element interface{}
+}
+
+func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
+       if !isArrayOrSlice(actual) && !isMap(actual) {
+               return false, fmt.Errorf("ContainElement matcher expects an array/slice/map.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
+       if !elementIsMatcher {
+               elemMatcher = &EqualMatcher{Expected: matcher.Element}
+       }
+
+       value := reflect.ValueOf(actual)
+       var keys []reflect.Value
+       if isMap(actual) {
+               keys = value.MapKeys()
+       }
+       var lastError error
+       for i := 0; i < value.Len(); i++ {
+               var success bool
+               var err error
+               if isMap(actual) {
+                       success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface())
+               } else {
+                       success, err = elemMatcher.Match(value.Index(i).Interface())
+               }
+               if err != nil {
+                       lastError = err
+                       continue
+               }
+               if success {
+                       return true, nil
+               }
+       }
+
+       return false, lastError
+}
+
+func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to contain element matching", matcher.Element)
+}
+
+func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to contain element matching", matcher.Element)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/contain_substring_matcher.go
new file mode 100644 (file)
index 0000000..2e76089
--- /dev/null
@@ -0,0 +1,37 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "strings"
+)
+
+type ContainSubstringMatcher struct {
+       Substr string
+       Args   []interface{}
+}
+
+func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) {
+       actualString, ok := toString(actual)
+       if !ok {
+               return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       return strings.Contains(actualString, matcher.stringToMatch()), nil
+}
+
+func (matcher *ContainSubstringMatcher) stringToMatch() string {
+       stringToMatch := matcher.Substr
+       if len(matcher.Args) > 0 {
+               stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...)
+       }
+       return stringToMatch
+}
+
+func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to contain substring", matcher.stringToMatch())
+}
+
+func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to contain substring", matcher.stringToMatch())
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/equal_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/equal_matcher.go
new file mode 100644 (file)
index 0000000..d186597
--- /dev/null
@@ -0,0 +1,27 @@
+package matchers
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/onsi/gomega/format"
+)
+
+type EqualMatcher struct {
+       Expected interface{}
+}
+
+func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) {
+       if actual == nil && matcher.Expected == nil {
+               return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead.  This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+       }
+       return reflect.DeepEqual(actual, matcher.Expected), nil
+}
+
+func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to equal", matcher.Expected)
+}
+
+func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to equal", matcher.Expected)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_key_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_key_matcher.go
new file mode 100644 (file)
index 0000000..5701ba6
--- /dev/null
@@ -0,0 +1,53 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "reflect"
+)
+
+type HaveKeyMatcher struct {
+       Key interface{}
+}
+
+func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) {
+       if !isMap(actual) {
+               return false, fmt.Errorf("HaveKey matcher expects a map.  Got:%s", format.Object(actual, 1))
+       }
+
+       keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
+       if !keyIsMatcher {
+               keyMatcher = &EqualMatcher{Expected: matcher.Key}
+       }
+
+       keys := reflect.ValueOf(actual).MapKeys()
+       for i := 0; i < len(keys); i++ {
+               success, err := keyMatcher.Match(keys[i].Interface())
+               if err != nil {
+                       return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error())
+               }
+               if success {
+                       return true, nil
+               }
+       }
+
+       return false, nil
+}
+
+func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) {
+       switch matcher.Key.(type) {
+       case omegaMatcher:
+               return format.Message(actual, "to have key matching", matcher.Key)
+       default:
+               return format.Message(actual, "to have key", matcher.Key)
+       }
+}
+
+func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       switch matcher.Key.(type) {
+       case omegaMatcher:
+               return format.Message(actual, "not to have key matching", matcher.Key)
+       default:
+               return format.Message(actual, "not to have key", matcher.Key)
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
new file mode 100644 (file)
index 0000000..464ac18
--- /dev/null
@@ -0,0 +1,73 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "reflect"
+)
+
+type HaveKeyWithValueMatcher struct {
+       Key   interface{}
+       Value interface{}
+}
+
+func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+       if !isMap(actual) {
+               return false, fmt.Errorf("HaveKeyWithValue matcher expects a map.  Got:%s", format.Object(actual, 1))
+       }
+
+       keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
+       if !keyIsMatcher {
+               keyMatcher = &EqualMatcher{Expected: matcher.Key}
+       }
+
+       valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher)
+       if !valueIsMatcher {
+               valueMatcher = &EqualMatcher{Expected: matcher.Value}
+       }
+
+       keys := reflect.ValueOf(actual).MapKeys()
+       for i := 0; i < len(keys); i++ {
+               success, err := keyMatcher.Match(keys[i].Interface())
+               if err != nil {
+                       return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error())
+               }
+               if success {
+                       actualValue := reflect.ValueOf(actual).MapIndex(keys[i])
+                       success, err := valueMatcher.Match(actualValue.Interface())
+                       if err != nil {
+                               return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error())
+                       }
+                       return success, nil
+               }
+       }
+
+       return false, nil
+}
+
+func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) {
+       str := "to have {key: value}"
+       if _, ok := matcher.Key.(omegaMatcher); ok {
+               str += " matching"
+       } else if _, ok := matcher.Value.(omegaMatcher); ok {
+               str += " matching"
+       }
+
+       expect := make(map[interface{}]interface{}, 1)
+       expect[matcher.Key] = matcher.Value
+       return format.Message(actual, str, expect)
+}
+
+func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       kStr := "not to have key"
+       if _, ok := matcher.Key.(omegaMatcher); ok {
+               kStr = "not to have key matching"
+       }
+
+       vStr := "or that key's value not be"
+       if _, ok := matcher.Value.(omegaMatcher); ok {
+               vStr = "or to have that key's value not matching"
+       }
+
+       return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_len_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_len_matcher.go
new file mode 100644 (file)
index 0000000..a183775
--- /dev/null
@@ -0,0 +1,27 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+)
+
+type HaveLenMatcher struct {
+       Count int
+}
+
+func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) {
+       length, ok := lengthOf(actual)
+       if !ok {
+               return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       return length == matcher.Count, nil
+}
+
+func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) {
+       return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count)
+}
+
+func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_occurred_matcher.go
new file mode 100644 (file)
index 0000000..cdc1d54
--- /dev/null
@@ -0,0 +1,30 @@
+package matchers
+
+import (
+       "fmt"
+
+       "github.com/onsi/gomega/format"
+)
+
+type HaveOccurredMatcher struct {
+}
+
+func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) {
+       if isNil(actual) {
+               return false, nil
+       }
+
+       if isError(actual) {
+               return true, nil
+       }
+
+       return false, fmt.Errorf("Expected an error.  Got:\n%s", format.Object(actual, 1))
+}
+
+func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) {
+       return fmt.Sprintf("Expected an error to have occurred.  Got:\n%s", format.Object(actual, 1))
+}
+
+func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return fmt.Sprintf("Expected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "not to have occurred")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_prefix_matcher.go
new file mode 100644 (file)
index 0000000..8b63a89
--- /dev/null
@@ -0,0 +1,35 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+)
+
+type HavePrefixMatcher struct {
+       Prefix string
+       Args   []interface{}
+}
+
+func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) {
+       actualString, ok := toString(actual)
+       if !ok {
+               return false, fmt.Errorf("HavePrefix matcher requires a string or stringer.  Got:\n%s", format.Object(actual, 1))
+       }
+       prefix := matcher.prefix()
+       return len(actualString) >= len(prefix) && actualString[0:len(prefix)] == prefix, nil
+}
+
+func (matcher *HavePrefixMatcher) prefix() string {
+       if len(matcher.Args) > 0 {
+               return fmt.Sprintf(matcher.Prefix, matcher.Args...)
+       }
+       return matcher.Prefix
+}
+
+func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to have prefix", matcher.prefix())
+}
+
+func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to have prefix", matcher.prefix())
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/have_suffix_matcher.go
new file mode 100644 (file)
index 0000000..eb1b284
--- /dev/null
@@ -0,0 +1,35 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+)
+
+type HaveSuffixMatcher struct {
+       Suffix string
+       Args   []interface{}
+}
+
+func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) {
+       actualString, ok := toString(actual)
+       if !ok {
+               return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer.  Got:\n%s", format.Object(actual, 1))
+       }
+       suffix := matcher.suffix()
+       return len(actualString) >= len(suffix) && actualString[len(actualString) - len(suffix):] == suffix, nil
+}
+
+func (matcher *HaveSuffixMatcher) suffix() string {
+       if len(matcher.Args) > 0 {
+               return fmt.Sprintf(matcher.Suffix, matcher.Args...)
+       }
+       return matcher.Suffix
+}
+
+func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to have suffix", matcher.suffix())
+}
+
+func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to have suffix", matcher.suffix())
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/match_error_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/match_error_matcher.go
new file mode 100644 (file)
index 0000000..03cdf04
--- /dev/null
@@ -0,0 +1,50 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "reflect"
+)
+
+type MatchErrorMatcher struct {
+       Expected interface{}
+}
+
+func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) {
+       if isNil(actual) {
+               return false, fmt.Errorf("Expected an error, got nil")
+       }
+
+       if !isError(actual) {
+               return false, fmt.Errorf("Expected an error.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       actualErr := actual.(error)
+
+       if isString(matcher.Expected) {
+               return reflect.DeepEqual(actualErr.Error(), matcher.Expected), nil
+       }
+
+       if isError(matcher.Expected) {
+               return reflect.DeepEqual(actualErr, matcher.Expected), nil
+       }
+
+       var subMatcher omegaMatcher
+       var hasSubMatcher bool
+       if matcher.Expected != nil {
+               subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher)
+               if hasSubMatcher {
+                       return subMatcher.Match(actualErr.Error())
+               }
+       }
+
+       return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings.  Got:\n%s", format.Object(matcher.Expected, 1))
+}
+
+func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to match error", matcher.Expected)
+}
+
+func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to match error", matcher.Expected)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/match_json_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/match_json_matcher.go
new file mode 100644 (file)
index 0000000..efc5e15
--- /dev/null
@@ -0,0 +1,61 @@
+package matchers
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "reflect"
+)
+
+type MatchJSONMatcher struct {
+       JSONToMatch interface{}
+}
+
+func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) {
+       actualString, expectedString, err := matcher.prettyPrint(actual)
+       if err != nil {
+               return false, err
+       }
+
+       var aval interface{}
+       var eval interface{}
+
+       // this is guarded by prettyPrint
+       json.Unmarshal([]byte(actualString), &aval)
+       json.Unmarshal([]byte(expectedString), &eval)
+
+       return reflect.DeepEqual(aval, eval), nil
+}
+
+func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) {
+       actualString, expectedString, _ := matcher.prettyPrint(actual)
+       return format.Message(actualString, "to match JSON of", expectedString)
+}
+
+func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       actualString, expectedString, _ := matcher.prettyPrint(actual)
+       return format.Message(actualString, "not to match JSON of", expectedString)
+}
+
+func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+       actualString, aok := toString(actual)
+       expectedString, eok := toString(matcher.JSONToMatch)
+
+       if !(aok && eok) {
+               return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string or stringer.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       abuf := new(bytes.Buffer)
+       ebuf := new(bytes.Buffer)
+
+       if err := json.Indent(abuf, []byte(actualString), "", "  "); err != nil {
+               return "", "", err
+       }
+
+       if err := json.Indent(ebuf, []byte(expectedString), "", "  "); err != nil {
+               return "", "", err
+       }
+
+       return abuf.String(), ebuf.String(), nil
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/match_regexp_matcher.go
new file mode 100644 (file)
index 0000000..7ca79a1
--- /dev/null
@@ -0,0 +1,42 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "regexp"
+)
+
+type MatchRegexpMatcher struct {
+       Regexp string
+       Args   []interface{}
+}
+
+func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) {
+       actualString, ok := toString(actual)
+       if !ok {
+               return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1))
+       }
+
+       match, err := regexp.Match(matcher.regexp(), []byte(actualString))
+       if err != nil {
+               return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error())
+       }
+
+       return match, nil
+}
+
+func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to match regular expression", matcher.regexp())
+}
+
+func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to match regular expression", matcher.regexp())
+}
+
+func (matcher *MatchRegexpMatcher) regexp() string {
+       re := matcher.Regexp
+       if len(matcher.Args) > 0 {
+               re = fmt.Sprintf(matcher.Regexp, matcher.Args...)
+       }
+       return re
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/not.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/not.go
new file mode 100644 (file)
index 0000000..2c91670
--- /dev/null
@@ -0,0 +1,30 @@
+package matchers
+
+import (
+       "github.com/onsi/gomega/internal/oraclematcher"
+       "github.com/onsi/gomega/types"
+)
+
+type NotMatcher struct {
+       Matcher types.GomegaMatcher
+}
+
+func (m *NotMatcher) Match(actual interface{}) (bool, error) {
+       success, err := m.Matcher.Match(actual)
+       if err != nil {
+               return false, err
+       }
+       return !success, nil
+}
+
+func (m *NotMatcher) FailureMessage(actual interface{}) (message string) {
+       return m.Matcher.NegatedFailureMessage(actual) // works beautifully
+}
+
+func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return m.Matcher.FailureMessage(actual) // works beautifully
+}
+
+func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+       return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/or.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/or.go
new file mode 100644 (file)
index 0000000..3bf7998
--- /dev/null
@@ -0,0 +1,67 @@
+package matchers
+
+import (
+       "fmt"
+
+       "github.com/onsi/gomega/format"
+       "github.com/onsi/gomega/internal/oraclematcher"
+       "github.com/onsi/gomega/types"
+)
+
+type OrMatcher struct {
+       Matchers []types.GomegaMatcher
+
+       // state
+       firstSuccessfulMatcher types.GomegaMatcher
+}
+
+func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
+       m.firstSuccessfulMatcher = nil
+       for _, matcher := range m.Matchers {
+               success, err := matcher.Match(actual)
+               if err != nil {
+                       return false, err
+               }
+               if success {
+                       m.firstSuccessfulMatcher = matcher
+                       return true, nil
+               }
+       }
+       return false, nil
+}
+
+func (m *OrMatcher) FailureMessage(actual interface{}) (message string) {
+       // not the most beautiful list of matchers, but not bad either...
+       return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers))
+}
+
+func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return m.firstSuccessfulMatcher.NegatedFailureMessage(actual)
+}
+
+func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+       /*
+               Example with 3 matchers: A, B, C
+
+               Match evaluates them: F, T, <?>  => T
+               So match is currently T, what should MatchMayChangeInTheFuture() return?
+               Seems like it only depends on B, since currently B MUST change to allow the result to become F
+
+               Match eval: F, F, F  => F
+               So match is currently F, what should MatchMayChangeInTheFuture() return?
+               Seems to depend on ANY of them being able to change to T.
+       */
+
+       if m.firstSuccessfulMatcher != nil {
+               // one of the matchers succeeded.. it must be able to change in order to affect the result
+               return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual)
+       } else {
+               // so all matchers failed.. Any one of them changing would change the result.
+               for _, matcher := range m.Matchers {
+                       if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) {
+                               return true
+                       }
+               }
+               return false // none of were going to change
+       }
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/panic_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/panic_matcher.go
new file mode 100644 (file)
index 0000000..75ab251
--- /dev/null
@@ -0,0 +1,42 @@
+package matchers
+
+import (
+       "fmt"
+       "github.com/onsi/gomega/format"
+       "reflect"
+)
+
+type PanicMatcher struct{}
+
+func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) {
+       if actual == nil {
+               return false, fmt.Errorf("PanicMatcher expects a non-nil actual.")
+       }
+
+       actualType := reflect.TypeOf(actual)
+       if actualType.Kind() != reflect.Func {
+               return false, fmt.Errorf("PanicMatcher expects a function.  Got:\n%s", format.Object(actual, 1))
+       }
+       if !(actualType.NumIn() == 0 && actualType.NumOut() == 0) {
+               return false, fmt.Errorf("PanicMatcher expects a function with no arguments and no return value.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       success = false
+       defer func() {
+               if e := recover(); e != nil {
+                       success = true
+               }
+       }()
+
+       reflect.ValueOf(actual).Call([]reflect.Value{})
+
+       return
+}
+
+func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "to panic")
+}
+
+func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return format.Message(actual, "not to panic")
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/receive_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/receive_matcher.go
new file mode 100644 (file)
index 0000000..7a8c2cd
--- /dev/null
@@ -0,0 +1,126 @@
+package matchers
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/onsi/gomega/format"
+)
+
+type ReceiveMatcher struct {
+       Arg           interface{}
+       receivedValue reflect.Value
+       channelClosed bool
+}
+
+func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) {
+       if !isChan(actual) {
+               return false, fmt.Errorf("ReceiveMatcher expects a channel.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       channelType := reflect.TypeOf(actual)
+       channelValue := reflect.ValueOf(actual)
+
+       if channelType.ChanDir() == reflect.SendDir {
+               return false, fmt.Errorf("ReceiveMatcher matcher cannot be passed a send-only channel.  Got:\n%s", format.Object(actual, 1))
+       }
+
+       var subMatcher omegaMatcher
+       var hasSubMatcher bool
+
+       if matcher.Arg != nil {
+               subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher)
+               if !hasSubMatcher {
+                       argType := reflect.TypeOf(matcher.Arg)
+                       if argType.Kind() != reflect.Ptr {
+                               return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1))
+                       }
+
+                       assignable := channelType.Elem().AssignableTo(argType.Elem())
+                       if !assignable {
+                               return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(matcher.Arg, 1))
+                       }
+               }
+       }
+
+       winnerIndex, value, open := reflect.Select([]reflect.SelectCase{
+               reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue},
+               reflect.SelectCase{Dir: reflect.SelectDefault},
+       })
+
+       var closed bool
+       var didReceive bool
+       if winnerIndex == 0 {
+               closed = !open
+               didReceive = open
+       }
+       matcher.channelClosed = closed
+
+       if closed {
+               return false, nil
+       }
+
+       if hasSubMatcher {
+               if didReceive {
+                       matcher.receivedValue = value
+                       return subMatcher.Match(matcher.receivedValue.Interface())
+               } else {
+                       return false, nil
+               }
+       }
+
+       if didReceive {
+               if matcher.Arg != nil {
+                       outValue := reflect.ValueOf(matcher.Arg)
+                       reflect.Indirect(outValue).Set(value)
+               }
+
+               return true, nil
+       } else {
+               return false, nil
+       }
+}
+
+func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) {
+       subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
+
+       closedAddendum := ""
+       if matcher.channelClosed {
+               closedAddendum = " The channel is closed."
+       }
+
+       if hasSubMatcher {
+               if matcher.receivedValue.IsValid() {
+                       return subMatcher.FailureMessage(matcher.receivedValue.Interface())
+               }
+               return "When passed a matcher, ReceiveMatcher's channel *must* receive something."
+       } else {
+               return format.Message(actual, "to receive something."+closedAddendum)
+       }
+}
+
+func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
+
+       closedAddendum := ""
+       if matcher.channelClosed {
+               closedAddendum = " The channel is closed."
+       }
+
+       if hasSubMatcher {
+               if matcher.receivedValue.IsValid() {
+                       return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface())
+               }
+               return "When passed a matcher, ReceiveMatcher's channel *must* receive something."
+       } else {
+               return format.Message(actual, "not to receive anything."+closedAddendum)
+       }
+}
+
+func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+       if !isChan(actual) {
+               return false
+       }
+
+       return !matcher.channelClosed
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/succeed_matcher.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/succeed_matcher.go
new file mode 100644 (file)
index 0000000..f7dd853
--- /dev/null
@@ -0,0 +1,30 @@
+package matchers
+
+import (
+       "fmt"
+
+       "github.com/onsi/gomega/format"
+)
+
+type SucceedMatcher struct {
+}
+
+func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) {
+       if actual == nil {
+               return true, nil
+       }
+
+       if isError(actual) {
+               return false, nil
+       }
+
+       return false, fmt.Errorf("Expected an error-type.  Got:\n%s", format.Object(actual, 1))
+}
+
+func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) {
+       return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1))
+}
+
+func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+       return "Expected failure, but got no error."
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE
new file mode 100644 (file)
index 0000000..8edd817
--- /dev/null
@@ -0,0 +1,20 @@
+Copyright (c) 2014 Amit Kumar Gupta
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
new file mode 100644 (file)
index 0000000..119d21e
--- /dev/null
@@ -0,0 +1,41 @@
+package bipartitegraph
+
+import "errors"
+import "fmt"
+
+import . "github.com/onsi/gomega/matchers/support/goraph/node"
+import . "github.com/onsi/gomega/matchers/support/goraph/edge"
+
+type BipartiteGraph struct {
+       Left  NodeOrderedSet
+       Right NodeOrderedSet
+       Edges EdgeSet
+}
+
+func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) {
+       left := NodeOrderedSet{}
+       for i, _ := range leftValues {
+               left = append(left, Node{i})
+       }
+
+       right := NodeOrderedSet{}
+       for j, _ := range rightValues {
+               right = append(right, Node{j + len(left)})
+       }
+
+       edges := EdgeSet{}
+       for i, leftValue := range leftValues {
+               for j, rightValue := range rightValues {
+                       neighbours, err := neighbours(leftValue, rightValue)
+                       if err != nil {
+                               return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error()))
+                       }
+
+                       if neighbours {
+                               edges = append(edges, Edge{left[i], right[j]})
+                       }
+               }
+       }
+
+       return &BipartiteGraph{left, right, edges}, nil
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
new file mode 100644 (file)
index 0000000..32529c5
--- /dev/null
@@ -0,0 +1,161 @@
+package bipartitegraph
+
+import . "github.com/onsi/gomega/matchers/support/goraph/node"
+import . "github.com/onsi/gomega/matchers/support/goraph/edge"
+import "github.com/onsi/gomega/matchers/support/goraph/util"
+
+func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) {
+       paths := bg.maximalDisjointSLAPCollection(matching)
+
+       for len(paths) > 0 {
+               for _, path := range paths {
+                       matching = matching.SymmetricDifference(path)
+               }
+               paths = bg.maximalDisjointSLAPCollection(matching)
+       }
+
+       return
+}
+
+func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (result []EdgeSet) {
+       guideLayers := bg.createSLAPGuideLayers(matching)
+       if len(guideLayers) == 0 {
+               return
+       }
+
+       used := make(map[Node]bool)
+
+       for _, u := range guideLayers[len(guideLayers)-1] {
+               slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used)
+               if found {
+                       for _, edge := range slap {
+                               used[edge.Node1] = true
+                               used[edge.Node2] = true
+                       }
+                       result = append(result, slap)
+               }
+       }
+
+       return
+}
+
+func (bg *BipartiteGraph) findDisjointSLAP(
+       start Node,
+       matching EdgeSet,
+       guideLayers []NodeOrderedSet,
+       used map[Node]bool,
+) ([]Edge, bool) {
+       return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used)
+}
+
+func (bg *BipartiteGraph) findDisjointSLAPHelper(
+       currentNode Node,
+       currentSLAP EdgeSet,
+       currentLevel int,
+       matching EdgeSet,
+       guideLayers []NodeOrderedSet,
+       used map[Node]bool,
+) (EdgeSet, bool) {
+       used[currentNode] = true
+
+       if currentLevel == 0 {
+               return currentSLAP, true
+       }
+
+       for _, nextNode := range guideLayers[currentLevel-1] {
+               if used[nextNode] {
+                       continue
+               }
+
+               edge, found := bg.Edges.FindByNodes(currentNode, nextNode)
+               if !found {
+                       continue
+               }
+
+               if matching.Contains(edge) == util.Odd(currentLevel) {
+                       continue
+               }
+
+               currentSLAP = append(currentSLAP, edge)
+               slap, found := bg.findDisjointSLAPHelper(nextNode, currentSLAP, currentLevel-1, matching, guideLayers, used)
+               if found {
+                       return slap, true
+               }
+               currentSLAP = currentSLAP[:len(currentSLAP)-1]
+       }
+
+       used[currentNode] = false
+       return nil, false
+}
+
+func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) {
+       used := make(map[Node]bool)
+       currentLayer := NodeOrderedSet{}
+
+       for _, node := range bg.Left {
+               if matching.Free(node) {
+                       used[node] = true
+                       currentLayer = append(currentLayer, node)
+               }
+       }
+
+       if len(currentLayer) == 0 {
+               return []NodeOrderedSet{}
+       } else {
+               guideLayers = append(guideLayers, currentLayer)
+       }
+
+       done := false
+
+       for !done {
+               lastLayer := currentLayer
+               currentLayer = NodeOrderedSet{}
+
+               if util.Odd(len(guideLayers)) {
+                       for _, leftNode := range lastLayer {
+                               for _, rightNode := range bg.Right {
+                                       if used[rightNode] {
+                                               continue
+                                       }
+
+                                       edge, found := bg.Edges.FindByNodes(leftNode, rightNode)
+                                       if !found || matching.Contains(edge) {
+                                               continue
+                                       }
+
+                                       currentLayer = append(currentLayer, rightNode)
+                                       used[rightNode] = true
+
+                                       if matching.Free(rightNode) {
+                                               done = true
+                                       }
+                               }
+                       }
+               } else {
+                       for _, rightNode := range lastLayer {
+                               for _, leftNode := range bg.Left {
+                                       if used[leftNode] {
+                                               continue
+                                       }
+
+                                       edge, found := bg.Edges.FindByNodes(leftNode, rightNode)
+                                       if !found || !matching.Contains(edge) {
+                                               continue
+                                       }
+
+                                       currentLayer = append(currentLayer, leftNode)
+                                       used[leftNode] = true
+                               }
+                       }
+
+               }
+
+               if len(currentLayer) == 0 {
+                       return []NodeOrderedSet{}
+               } else {
+                       guideLayers = append(guideLayers, currentLayer)
+               }
+       }
+
+       return
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go
new file mode 100644 (file)
index 0000000..4fd15cc
--- /dev/null
@@ -0,0 +1,61 @@
+package edge
+
+import . "github.com/onsi/gomega/matchers/support/goraph/node"
+
+type Edge struct {
+       Node1 Node
+       Node2 Node
+}
+
+type EdgeSet []Edge
+
+func (ec EdgeSet) Free(node Node) bool {
+       for _, e := range ec {
+               if e.Node1 == node || e.Node2 == node {
+                       return false
+               }
+       }
+
+       return true
+}
+
+func (ec EdgeSet) Contains(edge Edge) bool {
+       for _, e := range ec {
+               if e == edge {
+                       return true
+               }
+       }
+
+       return false
+}
+
+func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) {
+       for _, e := range ec {
+               if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) {
+                       return e, true
+               }
+       }
+
+       return Edge{}, false
+}
+
+func (ec EdgeSet) SymmetricDifference(ec2 EdgeSet) EdgeSet {
+       edgesToInclude := make(map[Edge]bool)
+
+       for _, e := range ec {
+               edgesToInclude[e] = true
+       }
+
+       for _, e := range ec2 {
+               edgesToInclude[e] = !edgesToInclude[e]
+       }
+
+       result := EdgeSet{}
+       for e, include := range edgesToInclude {
+               if include {
+                       result = append(result, e)
+               }
+       }
+
+       return result
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/node/node.go
new file mode 100644 (file)
index 0000000..800c2ea
--- /dev/null
@@ -0,0 +1,7 @@
+package node
+
+type Node struct {
+       Id int
+}
+
+type NodeOrderedSet []Node
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/util/util.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/util/util.go
new file mode 100644 (file)
index 0000000..a24cd27
--- /dev/null
@@ -0,0 +1,7 @@
+package util
+
+import "math"
+
+func Odd(n int) bool {
+    return math.Mod(float64(n), 2.0) == 1.0
+} 
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/type_support.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/type_support.go
new file mode 100644 (file)
index 0000000..ef9b448
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+Gomega matchers
+
+This package implements the Gomega matchers and does not typically need to be imported.
+See the docs for Gomega for documentation on the matchers
+
+http://onsi.github.io/gomega/
+*/
+package matchers
+
+import (
+       "fmt"
+       "reflect"
+)
+
+type omegaMatcher interface {
+       Match(actual interface{}) (success bool, err error)
+       FailureMessage(actual interface{}) (message string)
+       NegatedFailureMessage(actual interface{}) (message string)
+}
+
+func isBool(a interface{}) bool {
+       return reflect.TypeOf(a).Kind() == reflect.Bool
+}
+
+func isNumber(a interface{}) bool {
+       if a == nil {
+               return false
+       }
+       kind := reflect.TypeOf(a).Kind()
+       return reflect.Int <= kind && kind <= reflect.Float64
+}
+
+func isInteger(a interface{}) bool {
+       kind := reflect.TypeOf(a).Kind()
+       return reflect.Int <= kind && kind <= reflect.Int64
+}
+
+func isUnsignedInteger(a interface{}) bool {
+       kind := reflect.TypeOf(a).Kind()
+       return reflect.Uint <= kind && kind <= reflect.Uint64
+}
+
+func isFloat(a interface{}) bool {
+       kind := reflect.TypeOf(a).Kind()
+       return reflect.Float32 <= kind && kind <= reflect.Float64
+}
+
+func toInteger(a interface{}) int64 {
+       if isInteger(a) {
+               return reflect.ValueOf(a).Int()
+       } else if isUnsignedInteger(a) {
+               return int64(reflect.ValueOf(a).Uint())
+       } else if isFloat(a) {
+               return int64(reflect.ValueOf(a).Float())
+       } else {
+               panic(fmt.Sprintf("Expected a number!  Got <%T> %#v", a, a))
+       }
+}
+
+func toUnsignedInteger(a interface{}) uint64 {
+       if isInteger(a) {
+               return uint64(reflect.ValueOf(a).Int())
+       } else if isUnsignedInteger(a) {
+               return reflect.ValueOf(a).Uint()
+       } else if isFloat(a) {
+               return uint64(reflect.ValueOf(a).Float())
+       } else {
+               panic(fmt.Sprintf("Expected a number!  Got <%T> %#v", a, a))
+       }
+}
+
+func toFloat(a interface{}) float64 {
+       if isInteger(a) {
+               return float64(reflect.ValueOf(a).Int())
+       } else if isUnsignedInteger(a) {
+               return float64(reflect.ValueOf(a).Uint())
+       } else if isFloat(a) {
+               return reflect.ValueOf(a).Float()
+       } else {
+               panic(fmt.Sprintf("Expected a number!  Got <%T> %#v", a, a))
+       }
+}
+
+func isError(a interface{}) bool {
+       _, ok := a.(error)
+       return ok
+}
+
+func isChan(a interface{}) bool {
+       if isNil(a) {
+               return false
+       }
+       return reflect.TypeOf(a).Kind() == reflect.Chan
+}
+
+func isMap(a interface{}) bool {
+       if a == nil {
+               return false
+       }
+       return reflect.TypeOf(a).Kind() == reflect.Map
+}
+
+func isArrayOrSlice(a interface{}) bool {
+       if a == nil {
+               return false
+       }
+       switch reflect.TypeOf(a).Kind() {
+       case reflect.Array, reflect.Slice:
+               return true
+       default:
+               return false
+       }
+}
+
+func isString(a interface{}) bool {
+       if a == nil {
+               return false
+       }
+       return reflect.TypeOf(a).Kind() == reflect.String
+}
+
+func toString(a interface{}) (string, bool) {
+       aString, isString := a.(string)
+       if isString {
+               return aString, true
+       }
+
+       aBytes, isBytes := a.([]byte)
+       if isBytes {
+               return string(aBytes), true
+       }
+
+       aStringer, isStringer := a.(fmt.Stringer)
+       if isStringer {
+               return aStringer.String(), true
+       }
+
+       return "", false
+}
+
+func lengthOf(a interface{}) (int, bool) {
+       if a == nil {
+               return 0, false
+       }
+       switch reflect.TypeOf(a).Kind() {
+       case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice:
+               return reflect.ValueOf(a).Len(), true
+       default:
+               return 0, false
+       }
+}
+
+func isNil(a interface{}) bool {
+       if a == nil {
+               return true
+       }
+
+       switch reflect.TypeOf(a).Kind() {
+       case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+               return reflect.ValueOf(a).IsNil()
+       }
+
+       return false
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/matchers/with_transform.go b/Godeps/_workspace/src/github.com/onsi/gomega/matchers/with_transform.go
new file mode 100644 (file)
index 0000000..8e58d8a
--- /dev/null
@@ -0,0 +1,72 @@
+package matchers
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/onsi/gomega/internal/oraclematcher"
+       "github.com/onsi/gomega/types"
+)
+
+type WithTransformMatcher struct {
+       // input
+       Transform interface{} // must be a function of one parameter that returns one value
+       Matcher   types.GomegaMatcher
+
+       // cached value
+       transformArgType reflect.Type
+
+       // state
+       transformedValue interface{}
+}
+
+func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher {
+       if transform == nil {
+               panic("transform function cannot be nil")
+       }
+       txType := reflect.TypeOf(transform)
+       if txType.NumIn() != 1 {
+               panic("transform function must have 1 argument")
+       }
+       if txType.NumOut() != 1 {
+               panic("transform function must have 1 return value")
+       }
+
+       return &WithTransformMatcher{
+               Transform:        transform,
+               Matcher:          matcher,
+               transformArgType: reflect.TypeOf(transform).In(0),
+       }
+}
+
+func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
+       // return error if actual's type is incompatible with Transform function's argument type
+       actualType := reflect.TypeOf(actual)
+       if !actualType.AssignableTo(m.transformArgType) {
+               return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType)
+       }
+
+       // call the Transform function with `actual`
+       fn := reflect.ValueOf(m.Transform)
+       result := fn.Call([]reflect.Value{reflect.ValueOf(actual)})
+       m.transformedValue = result[0].Interface() // expect exactly one value
+
+       return m.Matcher.Match(m.transformedValue)
+}
+
+func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) {
+       return m.Matcher.FailureMessage(m.transformedValue)
+}
+
+func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+       return m.Matcher.NegatedFailureMessage(m.transformedValue)
+}
+
+func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool {
+       // TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.)
+       //
+       // Querying the next matcher is fine if the transformer always will return the same value.
+       // But if the transformer is non-deterministic and returns a different value each time, then there
+       // is no point in querying the next matcher, since it can only comment on the last transformed value.
+       return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue)
+}
diff --git a/Godeps/_workspace/src/github.com/onsi/gomega/types/types.go b/Godeps/_workspace/src/github.com/onsi/gomega/types/types.go
new file mode 100644 (file)
index 0000000..1c632ad
--- /dev/null
@@ -0,0 +1,17 @@
+package types
+
+type GomegaFailHandler func(message string, callerSkip ...int)
+
+//A simple *testing.T interface wrapper
+type GomegaTestingT interface {
+       Errorf(format string, args ...interface{})
+}
+
+//All Gomega matchers must implement the GomegaMatcher interface
+//
+//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding_your_own_matchers
+type GomegaMatcher interface {
+       Match(actual interface{}) (success bool, err error)
+       FailureMessage(actual interface{}) (message string)
+       NegatedFailureMessage(actual interface{}) (message string)
+}
diff --git a/test b/test
index 6085cfe..fe3f90b 100755 (executable)
--- a/test
+++ b/test
@@ -11,7 +11,7 @@ set -e
 
 source ./build
 
-TESTABLE="plugins/ipam/dhcp"
+TESTABLE="plugins/ipam/dhcp plugins/main/loopback"
 FORMATTABLE="$TESTABLE libcni pkg/ip pkg/ns pkg/invoke pkg/types pkg/ipam pkg/skel plugins/ipam/host-local plugins/main/bridge plugins/meta/flannel plugins/meta/tuning"
 
 # user has not provided PKG override